blob: f40f3d2e431204f2246697192da2961da1cc2cc9 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Georgios Pinitas9fb11592018-04-26 20:34:58 +01002 * Copyright (c) 2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Pablo Tello89519332017-11-17 11:52:36 +000042
43#include <random>
44
45namespace arm_compute
46{
Pablo Tello89519332017-11-17 11:52:36 +000047namespace test
48{
49namespace validation
50{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000051using namespace arm_compute::misc::shape_calculator;
52
Andrew Mundy4d9379a2018-03-15 16:47:03 +000053template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000054class WinogradConvolutionLayerValidationFixture : public framework::Fixture
Pablo Tello89519332017-11-17 11:52:36 +000055{
56public:
57 template <typename...>
Pablo Tello7df27862018-05-30 11:44:26 +010058 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
59 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000060 {
Alex Gilday7da29b62018-03-23 14:16:00 +000061 ARM_COMPUTE_UNUSED(dilation);
62
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000063 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
64 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000065 }
66
67protected:
68 template <typename U>
69 void fill(U &&tensor, int i, float min, float max)
70 {
71 switch(tensor.data_type())
72 {
73 case DataType::F32:
74 {
75 std::uniform_real_distribution<> distribution(min, max);
76 library->fill(tensor, distribution, i);
77 break;
78 }
79 default:
80 {
81 ARM_COMPUTE_ERROR("Not supported");
82 library->fill_tensor_uniform(tensor, i);
83 break;
84 }
85 }
86 }
87
Pablo Tello7df27862018-05-30 11:44:26 +010088 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000089 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000090 {
91 // Create tensors
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000092 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
93 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
94 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
95 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
Pablo Tello89519332017-11-17 11:52:36 +000096
97 // Create and configure function
98 FunctionType conv;
Vidhya Sudhan Loganathan84ce1f92018-04-25 13:00:09 +010099 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000100 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
Pablo Tello89519332017-11-17 11:52:36 +0000101
102 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
103 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
104 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
105 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
106
107 // Allocate tensors
108 src.allocator()->allocate();
109 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000110 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000111 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000112
113 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
114 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
115 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
116 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
117
118 // Fill tensors
119 fill(AccessorType(src), 0, -1.f, 1.f);
120 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000121 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000122
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000123 // Compute Winograd Convolution function
Pablo Tello89519332017-11-17 11:52:36 +0000124 conv.run();
125
126 return dst;
127 }
128
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000129 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000130 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +0000131 {
132 // Create reference
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000133 SimpleTensor<T> src{ input_shape, data_type, 1 };
134 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
135 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
Pablo Tello89519332017-11-17 11:52:36 +0000136
137 // Fill reference
138 fill(src, 0, -1.f, 1.f);
139 fill(weights, 1, -1.f, 1.f);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000140 if(use_bias)
141 {
142 fill(bias, 2, -1.f, 1.f);
143 }
144 else
145 {
146 fill(bias, 2, 0.f, 0.f);
147 }
Pablo Tello89519332017-11-17 11:52:36 +0000148
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000149 SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
150
151 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
Pablo Tello89519332017-11-17 11:52:36 +0000152 }
153
154 TensorType _target{};
155 SimpleTensor<T> _reference{};
Pablo Tello89519332017-11-17 11:52:36 +0000156};
157
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100158template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100159class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
160{
161public:
162 template <typename...>
163 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info)
164 {
165 ARM_COMPUTE_UNUSED(dilation);
166
167 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
168 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
169 }
170
171protected:
172 template <typename U>
173 void fill(U &&tensor, int i, float min, float max)
174 {
175 switch(tensor.data_type())
176 {
177 case DataType::F32:
178 {
179 std::uniform_real_distribution<> distribution(min, max);
180 library->fill(tensor, distribution, i);
181 break;
182 }
183 default:
184 {
185 ARM_COMPUTE_ERROR("Not supported");
186 library->fill_tensor_uniform(tensor, i);
187 break;
188 }
189 }
190 }
191
192 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
193 DataType data_type, ActivationLayerInfo act_info)
194 {
195 // Create tensors
196 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
197 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
198 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
199 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
200
201 // Create and configure function
202 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100203 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
204 framework::LogLevel::ERRORS);
205 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100206
207 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
208 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
209 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
210 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
211
212 // Allocate tensors
213 src.allocator()->allocate();
214 weights.allocator()->allocate();
215 dst.allocator()->allocate();
216 bias.allocator()->allocate();
217
218 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
219 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
220 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
221 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
222
223 // Fill tensors
224 fill(AccessorType(src), 0, -1.f, 1.f);
225 fill(AccessorType(weights), 1, -1.f, 1.f);
226 fill(AccessorType(bias), 2, -1.f, 1.f);
227
228 // Compute Winograd Convolution function
229 conv.run();
230
231 return dst;
232 }
233
234 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
235 DataType data_type, ActivationLayerInfo act_info)
236 {
237 // Create reference
238 SimpleTensor<T> src{ input_shape, data_type, 1 };
239 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
240 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
241
242 // Fill reference
243 fill(src, 0, -1.f, 1.f);
244 fill(weights, 1, -1.f, 1.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100245 if(use_bias)
246 {
247 fill(bias, 2, -1.f, 1.f);
248 }
249 else
250 {
251 fill(bias, 2, 0.f, 0.f);
252 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100253
254 WinogradInfo winograd_info(Size2D(4U, 4U),
255 Size2D(weights_shape[0], weights_shape[1]),
256 Size2D(input_shape[0], input_shape[1]),
257 info,
258 src.data_layout());
259
260 // Compute tensor shapes for input, filter and output transforms
261 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
262 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
263 TensorShape batched_gemm_shape = input_transform_shape;
264 batched_gemm_shape[0] = filter_transform_shape[0];
265 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
266
267 // Dummy matrix C to perform matrix multiplication
268 SimpleTensor<T> dummy_c{ batched_gemm_shape, data_type, 1 };
269
270 // Compute Winograd-based convolution
271 SimpleTensor<T> input_transform_out = reference::winograd_input_transform<T>(src, input_transform_shape, winograd_info);
272 SimpleTensor<T> filter_transform_out = reference::winograd_filter_transform<T>(weights, filter_transform_shape, winograd_info);
273 SimpleTensor<T> batched_gemm = reference::gemm<T>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
274 SimpleTensor<T> conv_out = reference::winograd_output_transform<T>(batched_gemm, bias, output_transform_shape, winograd_info);
275
276 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
277 }
278
279 TensorType _target{};
280 SimpleTensor<T> _reference{};
281};
282
283template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000284class WinogradInputTransformValidationFixture : public framework::Fixture
285{
286public:
287 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000288 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000289 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000290 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000291
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000292 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
293 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000294 }
295
296protected:
297 template <typename U>
298 void fill(U &&tensor, int i, float min, float max)
299 {
300 switch(tensor.data_type())
301 {
302 case DataType::F32:
303 {
304 std::uniform_real_distribution<> distribution(min, max);
305 library->fill(tensor, distribution, i);
306 break;
307 }
308 default:
309 {
310 ARM_COMPUTE_ERROR("Not supported");
311 library->fill_tensor_uniform(tensor, i);
312 break;
313 }
314 }
315 }
316
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000317 TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000318 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000319 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
320 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000321
322 // Create and configure function
323 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000324 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000325
326 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
327 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
328
329 // Allocate tensors
330 src.allocator()->allocate();
331 dst.allocator()->allocate();
332
333 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
334 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
335
336 // Fill tensors
337 fill(AccessorType(src), 0, -1.f, 1.f);
338
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000339 // Compute Winograd input transform function
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000340 transf.run();
341
342 return dst;
343 }
344
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000345 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000346 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000347 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000348 SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000349
350 // Fill reference
351 fill(src, 0, -1.f, 1.f);
352
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000353 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000354 }
355
356 TensorType _target{};
357 SimpleTensor<T> _reference{};
358};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000359
360template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
361class WinogradFilterTransformValidationFixture : public framework::Fixture
362{
363public:
364 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000365 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000366 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000367 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
368 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000369
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000370 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
371 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000372 }
373
374protected:
375 template <typename U>
376 void fill(U &&tensor, int i, float min, float max)
377 {
378 switch(tensor.data_type())
379 {
380 case DataType::F32:
381 {
382 std::uniform_real_distribution<> distribution(min, max);
383 library->fill(tensor, distribution, i);
384 break;
385 }
386 default:
387 {
388 ARM_COMPUTE_ERROR("Not supported");
389 library->fill_tensor_uniform(tensor, i);
390 break;
391 }
392 }
393 }
394
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100395 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000396 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100397 if(data_layout == DataLayout::NHWC)
398 {
399 permute(input_shape, PermutationVector(2U, 0U, 1U));
400 }
401
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000402 // Create tensors
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000403 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100404 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000405
406 // Create and configure function
407 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000408 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000409
410 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
411 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
412
413 // Allocate tensors
414 src.allocator()->allocate();
415 dst.allocator()->allocate();
416
417 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
418 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
419
420 // Fill tensors
421 fill(AccessorType(src), 0, -1.f, 1.f);
422
423 filter_transform.run();
424
425 return dst;
426 }
427
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000428 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000429 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000430 // Create reference
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100431 SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000432
433 // Fill reference
434 fill(src, 0, -1.f, 1.f);
435
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000436 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000437 }
438
439 TensorType _target{};
440 SimpleTensor<T> _reference{};
441};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000442
443template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
444class WinogradOutputTransformValidationFixture : public framework::Fixture
445{
446public:
447 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000448 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000449 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100450 _target = compute_target(input_shape, winograd_info, data_type);
451 _reference = compute_reference(input_shape, winograd_info, data_type);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000452 }
453
454protected:
455 template <typename U>
456 void fill(U &&tensor, int i, float min, float max)
457 {
458 switch(tensor.data_type())
459 {
460 case DataType::F32:
461 {
462 std::uniform_real_distribution<> distribution(min, max);
463 library->fill(tensor, distribution, i);
464 break;
465 }
466 default:
467 {
468 ARM_COMPUTE_ERROR("Not supported");
469 library->fill_tensor_uniform(tensor, i);
470 break;
471 }
472 }
473 }
474
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100475 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000476 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100477 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
478
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000479 // Create tensors
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000480 TensorType src = create_tensor<TensorType>(input_shape, data_type);
481 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000482
483 // Create and configure function
484 FunctionType output_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000485 output_transform.configure(&src, nullptr, &dst, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000486
487 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
488 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
489
490 // Allocate tensors
491 src.allocator()->allocate();
492 dst.allocator()->allocate();
493
494 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
495 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
496
497 // Fill tensors
498 fill(AccessorType(src), 0, -1.f, 1.f);
499
500 output_transform.run();
501
502 return dst;
503 }
504
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100505 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000506 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100507 winograd_info.output_data_layout = DataLayout::NCHW;
508 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
509
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000510 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000511 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100512 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000513
514 // Fill reference
515 fill(src, 0, -1.f, 1.f);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100516 fill(bias, 1, 0.0f, 0.0f); // Fill with zeros as we validate just the output transform without bias contribution
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000517
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100518 return reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000519 }
520
521 TensorType _target{};
522 SimpleTensor<T> _reference{};
523};
Pablo Tello89519332017-11-17 11:52:36 +0000524} // namespace validation
525} // namespace test
526} // namespace arm_compute
527#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */