blob: ef596e0bae174314124b1482afaa754fd46fb4ac [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Georgios Pinitas9fb11592018-04-26 20:34:58 +01002 * Copyright (c) 2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000039#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000040#include "tests/validation/reference/Winograd.h"
Pablo Tello89519332017-11-17 11:52:36 +000041
42#include <random>
43
44namespace arm_compute
45{
Pablo Tello89519332017-11-17 11:52:36 +000046namespace test
47{
48namespace validation
49{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000050using namespace arm_compute::misc::shape_calculator;
51
Andrew Mundy4d9379a2018-03-15 16:47:03 +000052template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000053class WinogradConvolutionLayerValidationFixture : public framework::Fixture
Pablo Tello89519332017-11-17 11:52:36 +000054{
55public:
56 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000057 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000058 {
Alex Gilday7da29b62018-03-23 14:16:00 +000059 ARM_COMPUTE_UNUSED(dilation);
60
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000061 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
62 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000063 }
64
65protected:
66 template <typename U>
67 void fill(U &&tensor, int i, float min, float max)
68 {
69 switch(tensor.data_type())
70 {
71 case DataType::F32:
72 {
73 std::uniform_real_distribution<> distribution(min, max);
74 library->fill(tensor, distribution, i);
75 break;
76 }
77 default:
78 {
79 ARM_COMPUTE_ERROR("Not supported");
80 library->fill_tensor_uniform(tensor, i);
81 break;
82 }
83 }
84 }
85
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000086 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000087 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000088 {
89 // Create tensors
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000090 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
91 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
92 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
93 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
Pablo Tello89519332017-11-17 11:52:36 +000094
95 // Create and configure function
96 FunctionType conv;
Vidhya Sudhan Loganathan84ce1f92018-04-25 13:00:09 +010097 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
Andrew Mundy4d9379a2018-03-15 16:47:03 +000098 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000099
100 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
101 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
102 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
103 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
104
105 // Allocate tensors
106 src.allocator()->allocate();
107 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000108 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000109 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000110
111 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
112 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
113 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
114 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
115
116 // Fill tensors
117 fill(AccessorType(src), 0, -1.f, 1.f);
118 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000119 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000120
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000121 // Compute Winograd Convolution function
Pablo Tello89519332017-11-17 11:52:36 +0000122 conv.run();
123
124 return dst;
125 }
126
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000127 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000128 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +0000129 {
130 // Create reference
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000131 SimpleTensor<T> src{ input_shape, data_type, 1 };
132 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
133 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
Pablo Tello89519332017-11-17 11:52:36 +0000134
135 // Fill reference
136 fill(src, 0, -1.f, 1.f);
137 fill(weights, 1, -1.f, 1.f);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000138 if(use_bias)
139 {
140 fill(bias, 2, -1.f, 1.f);
141 }
142 else
143 {
144 fill(bias, 2, 0.f, 0.f);
145 }
Pablo Tello89519332017-11-17 11:52:36 +0000146
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000147 SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
148
149 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
Pablo Tello89519332017-11-17 11:52:36 +0000150 }
151
152 TensorType _target{};
153 SimpleTensor<T> _reference{};
Pablo Tello89519332017-11-17 11:52:36 +0000154};
155
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100156template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100157class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
158{
159public:
160 template <typename...>
161 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info)
162 {
163 ARM_COMPUTE_UNUSED(dilation);
164
165 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
166 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
167 }
168
169protected:
170 template <typename U>
171 void fill(U &&tensor, int i, float min, float max)
172 {
173 switch(tensor.data_type())
174 {
175 case DataType::F32:
176 {
177 std::uniform_real_distribution<> distribution(min, max);
178 library->fill(tensor, distribution, i);
179 break;
180 }
181 default:
182 {
183 ARM_COMPUTE_ERROR("Not supported");
184 library->fill_tensor_uniform(tensor, i);
185 break;
186 }
187 }
188 }
189
190 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
191 DataType data_type, ActivationLayerInfo act_info)
192 {
193 // Create tensors
194 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
195 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
196 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
197 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
198
199 // Create and configure function
200 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100201 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
202 framework::LogLevel::ERRORS);
203 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100204
205 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
206 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
207 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
208 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
209
210 // Allocate tensors
211 src.allocator()->allocate();
212 weights.allocator()->allocate();
213 dst.allocator()->allocate();
214 bias.allocator()->allocate();
215
216 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
217 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
218 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
219 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
220
221 // Fill tensors
222 fill(AccessorType(src), 0, -1.f, 1.f);
223 fill(AccessorType(weights), 1, -1.f, 1.f);
224 fill(AccessorType(bias), 2, -1.f, 1.f);
225
226 // Compute Winograd Convolution function
227 conv.run();
228
229 return dst;
230 }
231
232 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
233 DataType data_type, ActivationLayerInfo act_info)
234 {
235 // Create reference
236 SimpleTensor<T> src{ input_shape, data_type, 1 };
237 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
238 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
239
240 // Fill reference
241 fill(src, 0, -1.f, 1.f);
242 fill(weights, 1, -1.f, 1.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100243 if(use_bias)
244 {
245 fill(bias, 2, -1.f, 1.f);
246 }
247 else
248 {
249 fill(bias, 2, 0.f, 0.f);
250 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100251
252 WinogradInfo winograd_info(Size2D(4U, 4U),
253 Size2D(weights_shape[0], weights_shape[1]),
254 Size2D(input_shape[0], input_shape[1]),
255 info,
256 src.data_layout());
257
258 // Compute tensor shapes for input, filter and output transforms
259 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
260 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
261 TensorShape batched_gemm_shape = input_transform_shape;
262 batched_gemm_shape[0] = filter_transform_shape[0];
263 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
264
265 // Dummy matrix C to perform matrix multiplication
266 SimpleTensor<T> dummy_c{ batched_gemm_shape, data_type, 1 };
267
268 // Compute Winograd-based convolution
269 SimpleTensor<T> input_transform_out = reference::winograd_input_transform<T>(src, input_transform_shape, winograd_info);
270 SimpleTensor<T> filter_transform_out = reference::winograd_filter_transform<T>(weights, filter_transform_shape, winograd_info);
271 SimpleTensor<T> batched_gemm = reference::gemm<T>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
272 SimpleTensor<T> conv_out = reference::winograd_output_transform<T>(batched_gemm, bias, output_transform_shape, winograd_info);
273
274 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
275 }
276
277 TensorType _target{};
278 SimpleTensor<T> _reference{};
279};
280
281template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000282class WinogradInputTransformValidationFixture : public framework::Fixture
283{
284public:
285 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000286 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000287 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000288 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000289
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000290 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
291 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000292 }
293
294protected:
295 template <typename U>
296 void fill(U &&tensor, int i, float min, float max)
297 {
298 switch(tensor.data_type())
299 {
300 case DataType::F32:
301 {
302 std::uniform_real_distribution<> distribution(min, max);
303 library->fill(tensor, distribution, i);
304 break;
305 }
306 default:
307 {
308 ARM_COMPUTE_ERROR("Not supported");
309 library->fill_tensor_uniform(tensor, i);
310 break;
311 }
312 }
313 }
314
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000315 TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000316 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000317 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
318 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000319
320 // Create and configure function
321 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000322 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000323
324 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
325 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
326
327 // Allocate tensors
328 src.allocator()->allocate();
329 dst.allocator()->allocate();
330
331 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
332 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
333
334 // Fill tensors
335 fill(AccessorType(src), 0, -1.f, 1.f);
336
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000337 // Compute Winograd input transform function
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000338 transf.run();
339
340 return dst;
341 }
342
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000343 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000344 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000345 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000346 SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000347
348 // Fill reference
349 fill(src, 0, -1.f, 1.f);
350
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000351 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000352 }
353
354 TensorType _target{};
355 SimpleTensor<T> _reference{};
356};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000357
358template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
359class WinogradFilterTransformValidationFixture : public framework::Fixture
360{
361public:
362 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000363 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000364 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000365 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
366 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000367
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000368 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
369 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000370 }
371
372protected:
373 template <typename U>
374 void fill(U &&tensor, int i, float min, float max)
375 {
376 switch(tensor.data_type())
377 {
378 case DataType::F32:
379 {
380 std::uniform_real_distribution<> distribution(min, max);
381 library->fill(tensor, distribution, i);
382 break;
383 }
384 default:
385 {
386 ARM_COMPUTE_ERROR("Not supported");
387 library->fill_tensor_uniform(tensor, i);
388 break;
389 }
390 }
391 }
392
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000393 TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000394 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000395 // Create tensors
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000396 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
397 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000398
399 // Create and configure function
400 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000401 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000402
403 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
404 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
405
406 // Allocate tensors
407 src.allocator()->allocate();
408 dst.allocator()->allocate();
409
410 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
411 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
412
413 // Fill tensors
414 fill(AccessorType(src), 0, -1.f, 1.f);
415
416 filter_transform.run();
417
418 return dst;
419 }
420
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000421 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000422 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000423 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000424 SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000425
426 // Fill reference
427 fill(src, 0, -1.f, 1.f);
428
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000429 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000430 }
431
432 TensorType _target{};
433 SimpleTensor<T> _reference{};
434};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000435
436template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
437class WinogradOutputTransformValidationFixture : public framework::Fixture
438{
439public:
440 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000441 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000442 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000443 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000444
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000445 _target = compute_target(input_shape, output_shape, winograd_info, data_type);
446 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000447 }
448
449protected:
450 template <typename U>
451 void fill(U &&tensor, int i, float min, float max)
452 {
453 switch(tensor.data_type())
454 {
455 case DataType::F32:
456 {
457 std::uniform_real_distribution<> distribution(min, max);
458 library->fill(tensor, distribution, i);
459 break;
460 }
461 default:
462 {
463 ARM_COMPUTE_ERROR("Not supported");
464 library->fill_tensor_uniform(tensor, i);
465 break;
466 }
467 }
468 }
469
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000470 TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000471 {
472 // Create tensors
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000473 TensorType src = create_tensor<TensorType>(input_shape, data_type);
474 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000475
476 // Create and configure function
477 FunctionType output_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000478 output_transform.configure(&src, nullptr, &dst, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000479
480 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
481 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
482
483 // Allocate tensors
484 src.allocator()->allocate();
485 dst.allocator()->allocate();
486
487 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
488 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
489
490 // Fill tensors
491 fill(AccessorType(src), 0, -1.f, 1.f);
492
493 output_transform.run();
494
495 return dst;
496 }
497
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000498 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000499 {
500 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000501 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100502 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000503
504 // Fill reference
505 fill(src, 0, -1.f, 1.f);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100506 fill(bias, 1, 0.0f, 0.0f); // Fill with zeros as we validate just the output transform without bias contribution
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000507
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100508 return reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000509 }
510
511 TensorType _target{};
512 SimpleTensor<T> _reference{};
513};
Pablo Tello89519332017-11-17 11:52:36 +0000514} // namespace validation
515} // namespace test
516} // namespace arm_compute
517#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */