blob: 15ce201222d09b076d289037146680725c207b81 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Georgios Pinitas9fb11592018-04-26 20:34:58 +01002 * Copyright (c) 2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Pablo Tello89519332017-11-17 11:52:36 +000042
43#include <random>
44
45namespace arm_compute
46{
Pablo Tello89519332017-11-17 11:52:36 +000047namespace test
48{
49namespace validation
50{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000051using namespace arm_compute::misc::shape_calculator;
52
Andrew Mundy4d9379a2018-03-15 16:47:03 +000053template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000054class WinogradConvolutionLayerValidationFixture : public framework::Fixture
Pablo Tello89519332017-11-17 11:52:36 +000055{
56public:
57 template <typename...>
Pablo Tello7df27862018-05-30 11:44:26 +010058 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
59 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000060 {
Alex Gilday7da29b62018-03-23 14:16:00 +000061 ARM_COMPUTE_UNUSED(dilation);
62
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000063 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
64 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000065 }
66
67protected:
68 template <typename U>
69 void fill(U &&tensor, int i, float min, float max)
70 {
71 switch(tensor.data_type())
72 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +010073 case DataType::F16:
Pablo Tello89519332017-11-17 11:52:36 +000074 case DataType::F32:
75 {
76 std::uniform_real_distribution<> distribution(min, max);
77 library->fill(tensor, distribution, i);
78 break;
79 }
80 default:
81 {
82 ARM_COMPUTE_ERROR("Not supported");
83 library->fill_tensor_uniform(tensor, i);
84 break;
85 }
86 }
87 }
88
Pablo Tello7df27862018-05-30 11:44:26 +010089 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000090 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000091 {
92 // Create tensors
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000093 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
94 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
95 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
96 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
Pablo Tello89519332017-11-17 11:52:36 +000097
98 // Create and configure function
99 FunctionType conv;
Vidhya Sudhan Loganathan84ce1f92018-04-25 13:00:09 +0100100 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000101 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
Pablo Tello89519332017-11-17 11:52:36 +0000102
103 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
104 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
105 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
106 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
107
108 // Allocate tensors
109 src.allocator()->allocate();
110 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000111 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000112 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000113
114 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
115 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
116 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
117 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
118
119 // Fill tensors
120 fill(AccessorType(src), 0, -1.f, 1.f);
121 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000122 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000123
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000124 // Compute Winograd Convolution function
Pablo Tello89519332017-11-17 11:52:36 +0000125 conv.run();
126
127 return dst;
128 }
129
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000130 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000131 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +0000132 {
133 // Create reference
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000134 SimpleTensor<T> src{ input_shape, data_type, 1 };
135 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
136 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
Pablo Tello89519332017-11-17 11:52:36 +0000137
138 // Fill reference
139 fill(src, 0, -1.f, 1.f);
140 fill(weights, 1, -1.f, 1.f);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000141 if(use_bias)
142 {
143 fill(bias, 2, -1.f, 1.f);
144 }
145 else
146 {
147 fill(bias, 2, 0.f, 0.f);
148 }
Pablo Tello89519332017-11-17 11:52:36 +0000149
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000150 SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
151
152 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
Pablo Tello89519332017-11-17 11:52:36 +0000153 }
154
155 TensorType _target{};
156 SimpleTensor<T> _reference{};
Pablo Tello89519332017-11-17 11:52:36 +0000157};
158
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100159template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100160class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
161{
162public:
163 template <typename...>
Pablo Tello7282d562018-06-14 15:35:49 +0100164 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
165 DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
166
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100167 {
168 ARM_COMPUTE_UNUSED(dilation);
Pablo Tello7282d562018-06-14 15:35:49 +0100169 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100170 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
171 }
172
173protected:
174 template <typename U>
175 void fill(U &&tensor, int i, float min, float max)
176 {
177 switch(tensor.data_type())
178 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100179 case DataType::F16:
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100180 case DataType::F32:
181 {
182 std::uniform_real_distribution<> distribution(min, max);
183 library->fill(tensor, distribution, i);
184 break;
185 }
186 default:
187 {
188 ARM_COMPUTE_ERROR("Not supported");
189 library->fill_tensor_uniform(tensor, i);
190 break;
191 }
192 }
193 }
194
Pablo Tello7282d562018-06-14 15:35:49 +0100195 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
196 DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100197 {
Pablo Tello7282d562018-06-14 15:35:49 +0100198 if(data_layout == DataLayout::NHWC)
199 {
200 permute(input_shape, PermutationVector(2U, 0U, 1U));
201 permute(weights_shape, PermutationVector(2U, 0U, 1U));
202 permute(output_shape, PermutationVector(2U, 0U, 1U));
203 }
204
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100205 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100206 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
207 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
208 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
209 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100210
211 // Create and configure function
212 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100213 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
214 framework::LogLevel::ERRORS);
215 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100216
217 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
218 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
219 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
220 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
221
222 // Allocate tensors
223 src.allocator()->allocate();
224 weights.allocator()->allocate();
225 dst.allocator()->allocate();
226 bias.allocator()->allocate();
227
228 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
229 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
230 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
231 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
232
233 // Fill tensors
234 fill(AccessorType(src), 0, -1.f, 1.f);
235 fill(AccessorType(weights), 1, -1.f, 1.f);
236 fill(AccessorType(bias), 2, -1.f, 1.f);
237
238 // Compute Winograd Convolution function
239 conv.run();
240
241 return dst;
242 }
243
244 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
245 DataType data_type, ActivationLayerInfo act_info)
246 {
247 // Create reference
248 SimpleTensor<T> src{ input_shape, data_type, 1 };
249 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
250 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
251
252 // Fill reference
253 fill(src, 0, -1.f, 1.f);
254 fill(weights, 1, -1.f, 1.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100255 if(use_bias)
256 {
257 fill(bias, 2, -1.f, 1.f);
258 }
259 else
260 {
261 fill(bias, 2, 0.f, 0.f);
262 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100263
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100264 // Set output tile
265 Size2D output_tile(4U, 4U);
Pablo Tello96e922e2018-09-26 11:25:15 +0100266 if(weights_shape[0] == 7 && weights_shape[1] == 1)
267 {
268 output_tile.width = 2;
269 output_tile.height = 1;
270 }
271 else if(weights_shape[0] == 1 && weights_shape[1] == 7)
272 {
273 output_tile.width = 1;
274 output_tile.height = 2;
275 }
276 else if(weights_shape[0] == 1)
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100277 {
278 output_tile.width = 1;
279 }
280 else if(weights_shape[1] == 1)
281 {
282 output_tile.height = 1;
283 }
284
285 WinogradInfo winograd_info(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100286 Size2D(weights_shape[0], weights_shape[1]),
287 Size2D(input_shape[0], input_shape[1]),
288 info,
289 src.data_layout());
290
291 // Compute tensor shapes for input, filter and output transforms
292 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
293 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
294 TensorShape batched_gemm_shape = input_transform_shape;
295 batched_gemm_shape[0] = filter_transform_shape[0];
296 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
297
298 // Dummy matrix C to perform matrix multiplication
299 SimpleTensor<T> dummy_c{ batched_gemm_shape, data_type, 1 };
300
301 // Compute Winograd-based convolution
302 SimpleTensor<T> input_transform_out = reference::winograd_input_transform<T>(src, input_transform_shape, winograd_info);
303 SimpleTensor<T> filter_transform_out = reference::winograd_filter_transform<T>(weights, filter_transform_shape, winograd_info);
304 SimpleTensor<T> batched_gemm = reference::gemm<T>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
305 SimpleTensor<T> conv_out = reference::winograd_output_transform<T>(batched_gemm, bias, output_transform_shape, winograd_info);
306
307 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
308 }
309
310 TensorType _target{};
311 SimpleTensor<T> _reference{};
312};
313
314template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000315class WinogradInputTransformValidationFixture : public framework::Fixture
316{
317public:
318 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000319 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000320 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000321 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000322
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000323 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
324 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000325 }
326
327protected:
328 template <typename U>
329 void fill(U &&tensor, int i, float min, float max)
330 {
331 switch(tensor.data_type())
332 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100333 case DataType::F16:
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000334 case DataType::F32:
335 {
336 std::uniform_real_distribution<> distribution(min, max);
337 library->fill(tensor, distribution, i);
338 break;
339 }
340 default:
341 {
342 ARM_COMPUTE_ERROR("Not supported");
343 library->fill_tensor_uniform(tensor, i);
344 break;
345 }
346 }
347 }
348
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100349 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000350 {
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100351 if(data_layout == DataLayout::NHWC)
352 {
353 permute(input_shape, PermutationVector(2U, 0U, 1U));
354 }
355
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100356 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
357 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000358
359 // Create and configure function
360 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000361 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000362
363 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
364 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
365
366 // Allocate tensors
367 src.allocator()->allocate();
368 dst.allocator()->allocate();
369
370 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
371 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
372
373 // Fill tensors
374 fill(AccessorType(src), 0, -1.f, 1.f);
375
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000376 // Compute Winograd input transform function
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000377 transf.run();
378
379 return dst;
380 }
381
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000382 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000383 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000384 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100385 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000386
387 // Fill reference
388 fill(src, 0, -1.f, 1.f);
389
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000390 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000391 }
392
393 TensorType _target{};
394 SimpleTensor<T> _reference{};
395};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000396
397template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
398class WinogradFilterTransformValidationFixture : public framework::Fixture
399{
400public:
401 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000402 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000403 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000404 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
405 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000406
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000407 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
408 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000409 }
410
411protected:
412 template <typename U>
413 void fill(U &&tensor, int i, float min, float max)
414 {
415 switch(tensor.data_type())
416 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100417 case DataType::F16:
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000418 case DataType::F32:
419 {
420 std::uniform_real_distribution<> distribution(min, max);
421 library->fill(tensor, distribution, i);
422 break;
423 }
424 default:
425 {
426 ARM_COMPUTE_ERROR("Not supported");
427 library->fill_tensor_uniform(tensor, i);
428 break;
429 }
430 }
431 }
432
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100433 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000434 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100435 if(data_layout == DataLayout::NHWC)
436 {
437 permute(input_shape, PermutationVector(2U, 0U, 1U));
438 }
439
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000440 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100441 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
442 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000443
444 // Create and configure function
445 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000446 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000447
448 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
449 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
450
451 // Allocate tensors
452 src.allocator()->allocate();
453 dst.allocator()->allocate();
454
455 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
456 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
457
458 // Fill tensors
459 fill(AccessorType(src), 0, -1.f, 1.f);
460
461 filter_transform.run();
462
463 return dst;
464 }
465
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000466 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000467 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000468 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100469 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000470
471 // Fill reference
472 fill(src, 0, -1.f, 1.f);
473
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000474 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000475 }
476
477 TensorType _target{};
478 SimpleTensor<T> _reference{};
479};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000480
481template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
482class WinogradOutputTransformValidationFixture : public framework::Fixture
483{
484public:
485 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000486 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000487 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100488 _target = compute_target(input_shape, winograd_info, data_type);
489 _reference = compute_reference(input_shape, winograd_info, data_type);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000490 }
491
492protected:
493 template <typename U>
494 void fill(U &&tensor, int i, float min, float max)
495 {
496 switch(tensor.data_type())
497 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100498 case DataType::F16:
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000499 case DataType::F32:
500 {
501 std::uniform_real_distribution<> distribution(min, max);
502 library->fill(tensor, distribution, i);
503 break;
504 }
505 default:
506 {
507 ARM_COMPUTE_ERROR("Not supported");
508 library->fill_tensor_uniform(tensor, i);
509 break;
510 }
511 }
512 }
513
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100514 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000515 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100516 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
517
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000518 // Create tensors
Giorgio Arenaea55f912018-07-12 15:41:35 +0100519 TensorType src = create_tensor<TensorType>(input_shape, data_type);
520 TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
521 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000522
523 // Create and configure function
524 FunctionType output_transform;
Giorgio Arenaea55f912018-07-12 15:41:35 +0100525 output_transform.configure(&src, &bias, &dst, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000526
527 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100528 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000529 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
530
531 // Allocate tensors
532 src.allocator()->allocate();
Giorgio Arenaea55f912018-07-12 15:41:35 +0100533 bias.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000534 dst.allocator()->allocate();
535
536 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100537 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000538 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
539
540 // Fill tensors
541 fill(AccessorType(src), 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100542 fill(AccessorType(bias), 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000543
544 output_transform.run();
545
546 return dst;
547 }
548
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100549 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000550 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100551 winograd_info.output_data_layout = DataLayout::NCHW;
552 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
553
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000554 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000555 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100556 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000557
558 // Fill reference
559 fill(src, 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100560 fill(bias, 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000561
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100562 return reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000563 }
564
565 TensorType _target{};
566 SimpleTensor<T> _reference{};
567};
Pablo Tello89519332017-11-17 11:52:36 +0000568} // namespace validation
569} // namespace test
570} // namespace arm_compute
571#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */