blob: 41f16d3a3920beb0cdba539231aba0bee4feae2d [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Georgios Pinitas9fb11592018-04-26 20:34:58 +01002 * Copyright (c) 2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Pablo Tello89519332017-11-17 11:52:36 +000042
43#include <random>
44
45namespace arm_compute
46{
Pablo Tello89519332017-11-17 11:52:36 +000047namespace test
48{
49namespace validation
50{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000051using namespace arm_compute::misc::shape_calculator;
52
Andrew Mundy4d9379a2018-03-15 16:47:03 +000053template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000054class WinogradConvolutionLayerValidationFixture : public framework::Fixture
Pablo Tello89519332017-11-17 11:52:36 +000055{
56public:
57 template <typename...>
Pablo Tello7df27862018-05-30 11:44:26 +010058 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
59 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000060 {
Alex Gilday7da29b62018-03-23 14:16:00 +000061 ARM_COMPUTE_UNUSED(dilation);
62
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000063 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
64 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000065 }
66
67protected:
68 template <typename U>
69 void fill(U &&tensor, int i, float min, float max)
70 {
71 switch(tensor.data_type())
72 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +010073 case DataType::F16:
Pablo Tello89519332017-11-17 11:52:36 +000074 case DataType::F32:
75 {
76 std::uniform_real_distribution<> distribution(min, max);
77 library->fill(tensor, distribution, i);
78 break;
79 }
80 default:
81 {
82 ARM_COMPUTE_ERROR("Not supported");
83 library->fill_tensor_uniform(tensor, i);
84 break;
85 }
86 }
87 }
88
Pablo Tello7df27862018-05-30 11:44:26 +010089 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000090 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000091 {
92 // Create tensors
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000093 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
94 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
95 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
96 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
Pablo Tello89519332017-11-17 11:52:36 +000097
98 // Create and configure function
99 FunctionType conv;
Vidhya Sudhan Loganathan84ce1f92018-04-25 13:00:09 +0100100 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000101 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
Pablo Tello89519332017-11-17 11:52:36 +0000102
103 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
104 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
105 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
106 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
107
108 // Allocate tensors
109 src.allocator()->allocate();
110 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000111 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000112 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000113
114 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
115 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
116 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
117 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
118
119 // Fill tensors
120 fill(AccessorType(src), 0, -1.f, 1.f);
121 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000122 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000123
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000124 // Compute Winograd Convolution function
Pablo Tello89519332017-11-17 11:52:36 +0000125 conv.run();
126
127 return dst;
128 }
129
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000130 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000131 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +0000132 {
133 // Create reference
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000134 SimpleTensor<T> src{ input_shape, data_type, 1 };
135 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
136 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
Pablo Tello89519332017-11-17 11:52:36 +0000137
138 // Fill reference
139 fill(src, 0, -1.f, 1.f);
140 fill(weights, 1, -1.f, 1.f);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000141 if(use_bias)
142 {
143 fill(bias, 2, -1.f, 1.f);
144 }
145 else
146 {
147 fill(bias, 2, 0.f, 0.f);
148 }
Pablo Tello89519332017-11-17 11:52:36 +0000149
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000150 SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
151
152 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
Pablo Tello89519332017-11-17 11:52:36 +0000153 }
154
155 TensorType _target{};
156 SimpleTensor<T> _reference{};
Pablo Tello89519332017-11-17 11:52:36 +0000157};
158
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100159template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100160class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
161{
162public:
163 template <typename...>
Pablo Tello7282d562018-06-14 15:35:49 +0100164 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
165 DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
166
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100167 {
168 ARM_COMPUTE_UNUSED(dilation);
Pablo Tello7282d562018-06-14 15:35:49 +0100169 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100170 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
171 }
172
173protected:
174 template <typename U>
175 void fill(U &&tensor, int i, float min, float max)
176 {
177 switch(tensor.data_type())
178 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100179 case DataType::F16:
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100180 case DataType::F32:
181 {
182 std::uniform_real_distribution<> distribution(min, max);
183 library->fill(tensor, distribution, i);
184 break;
185 }
186 default:
187 {
188 ARM_COMPUTE_ERROR("Not supported");
189 library->fill_tensor_uniform(tensor, i);
190 break;
191 }
192 }
193 }
194
Pablo Tello7282d562018-06-14 15:35:49 +0100195 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
196 DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100197 {
Pablo Tello7282d562018-06-14 15:35:49 +0100198 if(data_layout == DataLayout::NHWC)
199 {
200 permute(input_shape, PermutationVector(2U, 0U, 1U));
201 permute(weights_shape, PermutationVector(2U, 0U, 1U));
202 permute(output_shape, PermutationVector(2U, 0U, 1U));
203 }
204
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100205 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100206 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
207 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
208 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
209 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100210
211 // Create and configure function
212 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100213 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
214 framework::LogLevel::ERRORS);
215 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100216
217 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
218 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
219 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
220 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
221
222 // Allocate tensors
223 src.allocator()->allocate();
224 weights.allocator()->allocate();
225 dst.allocator()->allocate();
226 bias.allocator()->allocate();
227
228 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
229 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
230 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
231 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
232
233 // Fill tensors
234 fill(AccessorType(src), 0, -1.f, 1.f);
235 fill(AccessorType(weights), 1, -1.f, 1.f);
236 fill(AccessorType(bias), 2, -1.f, 1.f);
237
238 // Compute Winograd Convolution function
239 conv.run();
240
241 return dst;
242 }
243
244 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
245 DataType data_type, ActivationLayerInfo act_info)
246 {
247 // Create reference
248 SimpleTensor<T> src{ input_shape, data_type, 1 };
249 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
250 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
251
252 // Fill reference
253 fill(src, 0, -1.f, 1.f);
254 fill(weights, 1, -1.f, 1.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100255 if(use_bias)
256 {
257 fill(bias, 2, -1.f, 1.f);
258 }
259 else
260 {
261 fill(bias, 2, 0.f, 0.f);
262 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100263
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100264 // Set output tile
265 Size2D output_tile(4U, 4U);
266 if(weights_shape[0] == 1)
267 {
268 output_tile.width = 1;
269 }
270 else if(weights_shape[1] == 1)
271 {
272 output_tile.height = 1;
273 }
274
275 WinogradInfo winograd_info(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100276 Size2D(weights_shape[0], weights_shape[1]),
277 Size2D(input_shape[0], input_shape[1]),
278 info,
279 src.data_layout());
280
281 // Compute tensor shapes for input, filter and output transforms
282 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
283 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
284 TensorShape batched_gemm_shape = input_transform_shape;
285 batched_gemm_shape[0] = filter_transform_shape[0];
286 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
287
288 // Dummy matrix C to perform matrix multiplication
289 SimpleTensor<T> dummy_c{ batched_gemm_shape, data_type, 1 };
290
291 // Compute Winograd-based convolution
292 SimpleTensor<T> input_transform_out = reference::winograd_input_transform<T>(src, input_transform_shape, winograd_info);
293 SimpleTensor<T> filter_transform_out = reference::winograd_filter_transform<T>(weights, filter_transform_shape, winograd_info);
294 SimpleTensor<T> batched_gemm = reference::gemm<T>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
295 SimpleTensor<T> conv_out = reference::winograd_output_transform<T>(batched_gemm, bias, output_transform_shape, winograd_info);
296
297 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
298 }
299
300 TensorType _target{};
301 SimpleTensor<T> _reference{};
302};
303
304template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000305class WinogradInputTransformValidationFixture : public framework::Fixture
306{
307public:
308 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000309 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000310 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000311 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000312
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000313 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
314 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000315 }
316
317protected:
318 template <typename U>
319 void fill(U &&tensor, int i, float min, float max)
320 {
321 switch(tensor.data_type())
322 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100323 case DataType::F16:
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000324 case DataType::F32:
325 {
326 std::uniform_real_distribution<> distribution(min, max);
327 library->fill(tensor, distribution, i);
328 break;
329 }
330 default:
331 {
332 ARM_COMPUTE_ERROR("Not supported");
333 library->fill_tensor_uniform(tensor, i);
334 break;
335 }
336 }
337 }
338
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100339 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000340 {
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100341 if(data_layout == DataLayout::NHWC)
342 {
343 permute(input_shape, PermutationVector(2U, 0U, 1U));
344 }
345
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100346 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
347 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000348
349 // Create and configure function
350 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000351 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000352
353 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
354 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
355
356 // Allocate tensors
357 src.allocator()->allocate();
358 dst.allocator()->allocate();
359
360 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
361 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
362
363 // Fill tensors
364 fill(AccessorType(src), 0, -1.f, 1.f);
365
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000366 // Compute Winograd input transform function
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000367 transf.run();
368
369 return dst;
370 }
371
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000372 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000373 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000374 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100375 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000376
377 // Fill reference
378 fill(src, 0, -1.f, 1.f);
379
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000380 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000381 }
382
383 TensorType _target{};
384 SimpleTensor<T> _reference{};
385};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000386
387template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
388class WinogradFilterTransformValidationFixture : public framework::Fixture
389{
390public:
391 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000392 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000393 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000394 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
395 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000396
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000397 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
398 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000399 }
400
401protected:
402 template <typename U>
403 void fill(U &&tensor, int i, float min, float max)
404 {
405 switch(tensor.data_type())
406 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100407 case DataType::F16:
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000408 case DataType::F32:
409 {
410 std::uniform_real_distribution<> distribution(min, max);
411 library->fill(tensor, distribution, i);
412 break;
413 }
414 default:
415 {
416 ARM_COMPUTE_ERROR("Not supported");
417 library->fill_tensor_uniform(tensor, i);
418 break;
419 }
420 }
421 }
422
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100423 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000424 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100425 if(data_layout == DataLayout::NHWC)
426 {
427 permute(input_shape, PermutationVector(2U, 0U, 1U));
428 }
429
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000430 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100431 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
432 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000433
434 // Create and configure function
435 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000436 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000437
438 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
439 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
440
441 // Allocate tensors
442 src.allocator()->allocate();
443 dst.allocator()->allocate();
444
445 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
446 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
447
448 // Fill tensors
449 fill(AccessorType(src), 0, -1.f, 1.f);
450
451 filter_transform.run();
452
453 return dst;
454 }
455
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000456 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000457 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000458 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100459 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000460
461 // Fill reference
462 fill(src, 0, -1.f, 1.f);
463
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000464 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000465 }
466
467 TensorType _target{};
468 SimpleTensor<T> _reference{};
469};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000470
471template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
472class WinogradOutputTransformValidationFixture : public framework::Fixture
473{
474public:
475 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000476 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000477 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100478 _target = compute_target(input_shape, winograd_info, data_type);
479 _reference = compute_reference(input_shape, winograd_info, data_type);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000480 }
481
482protected:
483 template <typename U>
484 void fill(U &&tensor, int i, float min, float max)
485 {
486 switch(tensor.data_type())
487 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100488 case DataType::F16:
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000489 case DataType::F32:
490 {
491 std::uniform_real_distribution<> distribution(min, max);
492 library->fill(tensor, distribution, i);
493 break;
494 }
495 default:
496 {
497 ARM_COMPUTE_ERROR("Not supported");
498 library->fill_tensor_uniform(tensor, i);
499 break;
500 }
501 }
502 }
503
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100504 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000505 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100506 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
507
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000508 // Create tensors
Giorgio Arenaea55f912018-07-12 15:41:35 +0100509 TensorType src = create_tensor<TensorType>(input_shape, data_type);
510 TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
511 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000512
513 // Create and configure function
514 FunctionType output_transform;
Giorgio Arenaea55f912018-07-12 15:41:35 +0100515 output_transform.configure(&src, &bias, &dst, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000516
517 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100518 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000519 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
520
521 // Allocate tensors
522 src.allocator()->allocate();
Giorgio Arenaea55f912018-07-12 15:41:35 +0100523 bias.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000524 dst.allocator()->allocate();
525
526 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100527 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000528 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
529
530 // Fill tensors
531 fill(AccessorType(src), 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100532 fill(AccessorType(bias), 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000533
534 output_transform.run();
535
536 return dst;
537 }
538
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100539 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000540 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100541 winograd_info.output_data_layout = DataLayout::NCHW;
542 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
543
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000544 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000545 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100546 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000547
548 // Fill reference
549 fill(src, 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100550 fill(bias, 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000551
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100552 return reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000553 }
554
555 TensorType _target{};
556 SimpleTensor<T> _reference{};
557};
Pablo Tello89519332017-11-17 11:52:36 +0000558} // namespace validation
559} // namespace test
560} // namespace arm_compute
561#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */