blob: 8f34654c3a1a54702293139b7394e3402fbdf2d5 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Georgios Pinitas9fb11592018-04-26 20:34:58 +01002 * Copyright (c) 2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000042#include "utils/Utils.h"
Pablo Tello89519332017-11-17 11:52:36 +000043
44#include <random>
45
46namespace arm_compute
47{
Pablo Tello89519332017-11-17 11:52:36 +000048namespace test
49{
50namespace validation
51{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000052using namespace arm_compute::misc::shape_calculator;
53
Andrew Mundy4d9379a2018-03-15 16:47:03 +000054template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000055class WinogradConvolutionLayerValidationFixture : public framework::Fixture
Pablo Tello89519332017-11-17 11:52:36 +000056{
57public:
58 template <typename...>
Pablo Tello7df27862018-05-30 11:44:26 +010059 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
60 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000061 {
Alex Gilday7da29b62018-03-23 14:16:00 +000062 ARM_COMPUTE_UNUSED(dilation);
63
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000064 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
65 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000066 }
67
68protected:
69 template <typename U>
70 void fill(U &&tensor, int i, float min, float max)
71 {
72 switch(tensor.data_type())
73 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +010074 case DataType::F16:
Pablo Tello89519332017-11-17 11:52:36 +000075 case DataType::F32:
76 {
77 std::uniform_real_distribution<> distribution(min, max);
78 library->fill(tensor, distribution, i);
79 break;
80 }
81 default:
82 {
83 ARM_COMPUTE_ERROR("Not supported");
84 library->fill_tensor_uniform(tensor, i);
85 break;
86 }
87 }
88 }
89
Pablo Tello7df27862018-05-30 11:44:26 +010090 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000091 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000092 {
93 // Create tensors
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000094 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
95 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
96 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
97 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
Pablo Tello89519332017-11-17 11:52:36 +000098
99 // Create and configure function
100 FunctionType conv;
Vidhya Sudhan Loganathan84ce1f92018-04-25 13:00:09 +0100101 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000102 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
Pablo Tello89519332017-11-17 11:52:36 +0000103
104 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
105 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
106 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
107 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
108
109 // Allocate tensors
110 src.allocator()->allocate();
111 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000112 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000113 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000114
115 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
116 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
117 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
118 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
119
120 // Fill tensors
121 fill(AccessorType(src), 0, -1.f, 1.f);
122 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000123 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000124
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000125 // Compute Winograd Convolution function
Pablo Tello89519332017-11-17 11:52:36 +0000126 conv.run();
127
128 return dst;
129 }
130
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000131 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000132 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +0000133 {
134 // Create reference
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000135 SimpleTensor<T> src{ input_shape, data_type, 1 };
136 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
137 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
Pablo Tello89519332017-11-17 11:52:36 +0000138
139 // Fill reference
140 fill(src, 0, -1.f, 1.f);
141 fill(weights, 1, -1.f, 1.f);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000142 if(use_bias)
143 {
144 fill(bias, 2, -1.f, 1.f);
145 }
146 else
147 {
148 fill(bias, 2, 0.f, 0.f);
149 }
Pablo Tello89519332017-11-17 11:52:36 +0000150
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000151 SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
152
153 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
Pablo Tello89519332017-11-17 11:52:36 +0000154 }
155
156 TensorType _target{};
157 SimpleTensor<T> _reference{};
Pablo Tello89519332017-11-17 11:52:36 +0000158};
159
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000160template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100161class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
162{
163public:
164 template <typename...>
Pablo Tello7282d562018-06-14 15:35:49 +0100165 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
166 DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
167
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100168 {
169 ARM_COMPUTE_UNUSED(dilation);
Pablo Tello7282d562018-06-14 15:35:49 +0100170 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100171 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
172 }
173
174protected:
175 template <typename U>
176 void fill(U &&tensor, int i, float min, float max)
177 {
178 switch(tensor.data_type())
179 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100180 case DataType::F16:
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000181 {
182 arm_compute::utils::uniform_real_distribution_fp16 distribution((half)min, (half)max);
183 library->fill(tensor, distribution, i);
184 break;
185 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100186 case DataType::F32:
187 {
188 std::uniform_real_distribution<> distribution(min, max);
189 library->fill(tensor, distribution, i);
190 break;
191 }
192 default:
193 {
194 ARM_COMPUTE_ERROR("Not supported");
195 library->fill_tensor_uniform(tensor, i);
196 break;
197 }
198 }
199 }
200
Pablo Tello7282d562018-06-14 15:35:49 +0100201 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
202 DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100203 {
Pablo Tello7282d562018-06-14 15:35:49 +0100204 if(data_layout == DataLayout::NHWC)
205 {
206 permute(input_shape, PermutationVector(2U, 0U, 1U));
207 permute(weights_shape, PermutationVector(2U, 0U, 1U));
208 permute(output_shape, PermutationVector(2U, 0U, 1U));
209 }
210
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100211 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100212 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
213 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
214 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
215 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100216
217 // Create and configure function
218 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100219 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
220 framework::LogLevel::ERRORS);
221 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100222
223 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
224 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
225 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
226 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
227
228 // Allocate tensors
229 src.allocator()->allocate();
230 weights.allocator()->allocate();
231 dst.allocator()->allocate();
232 bias.allocator()->allocate();
233
234 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
235 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
236 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
237 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
238
239 // Fill tensors
240 fill(AccessorType(src), 0, -1.f, 1.f);
241 fill(AccessorType(weights), 1, -1.f, 1.f);
242 fill(AccessorType(bias), 2, -1.f, 1.f);
243
244 // Compute Winograd Convolution function
245 conv.run();
246
247 return dst;
248 }
249
250 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
251 DataType data_type, ActivationLayerInfo act_info)
252 {
253 // Create reference
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000254 SimpleTensor<T> src_t{ input_shape, data_type, 1 };
255 SimpleTensor<T> weights_t{ weights_shape, data_type, 1 };
256 SimpleTensor<T> bias_t{ bias_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100257
258 // Fill reference
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000259 fill(src_t, 0, -1.f, 1.f);
260 SimpleTensor<T1> src_t1(copy_tensor<T1, T>(src_t));
261
262 fill(weights_t, 1, -1.f, 1.f);
263 SimpleTensor<T1> weights_t1(copy_tensor<T1, T>(weights_t));
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100264 if(use_bias)
265 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000266 fill(bias_t, 2, -1.f, 1.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100267 }
268 else
269 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000270 fill(bias_t, 2, 0.f, 0.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100271 }
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000272 SimpleTensor<T1> bias_t1(copy_tensor<T1, T>(bias_t));
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100273
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100274 // Set output tile
275 Size2D output_tile(4U, 4U);
Pablo Tello96e922e2018-09-26 11:25:15 +0100276 if(weights_shape[0] == 7 && weights_shape[1] == 1)
277 {
278 output_tile.width = 2;
279 output_tile.height = 1;
280 }
281 else if(weights_shape[0] == 1 && weights_shape[1] == 7)
282 {
283 output_tile.width = 1;
284 output_tile.height = 2;
285 }
286 else if(weights_shape[0] == 1)
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100287 {
288 output_tile.width = 1;
289 }
290 else if(weights_shape[1] == 1)
291 {
292 output_tile.height = 1;
293 }
294
295 WinogradInfo winograd_info(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100296 Size2D(weights_shape[0], weights_shape[1]),
297 Size2D(input_shape[0], input_shape[1]),
298 info,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000299 src_t1.data_layout());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100300
301 // Compute tensor shapes for input, filter and output transforms
302 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
303 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
304 TensorShape batched_gemm_shape = input_transform_shape;
305 batched_gemm_shape[0] = filter_transform_shape[0];
306 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
307
308 // Dummy matrix C to perform matrix multiplication
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000309 SimpleTensor<T1> dummy_c{ batched_gemm_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100310
311 // Compute Winograd-based convolution
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000312 SimpleTensor<T1> input_transform_out = reference::winograd_input_transform<T1>(src_t1, input_transform_shape, winograd_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100313
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000314 SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info);
315 SimpleTensor<T1> batched_gemm = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
316 SimpleTensor<T1> conv_out = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
317 SimpleTensor<T> conv_out_t(std::move(copy_tensor<T, T1>(conv_out)));
318 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t;
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100319 }
320
321 TensorType _target{};
322 SimpleTensor<T> _reference{};
323};
324
325template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000326class WinogradInputTransformValidationFixture : public framework::Fixture
327{
328public:
329 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000330 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000331 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000332 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000333
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000334 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
335 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000336 }
337
338protected:
339 template <typename U>
340 void fill(U &&tensor, int i, float min, float max)
341 {
342 switch(tensor.data_type())
343 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100344 case DataType::F16:
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000345 case DataType::F32:
346 {
347 std::uniform_real_distribution<> distribution(min, max);
348 library->fill(tensor, distribution, i);
349 break;
350 }
351 default:
352 {
353 ARM_COMPUTE_ERROR("Not supported");
354 library->fill_tensor_uniform(tensor, i);
355 break;
356 }
357 }
358 }
359
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100360 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000361 {
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100362 if(data_layout == DataLayout::NHWC)
363 {
364 permute(input_shape, PermutationVector(2U, 0U, 1U));
365 }
366
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100367 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
368 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000369
370 // Create and configure function
371 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000372 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000373
374 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
375 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
376
377 // Allocate tensors
378 src.allocator()->allocate();
379 dst.allocator()->allocate();
380
381 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
382 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
383
384 // Fill tensors
385 fill(AccessorType(src), 0, -1.f, 1.f);
386
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000387 // Compute Winograd input transform function
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000388 transf.run();
389
390 return dst;
391 }
392
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000393 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000394 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000395 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100396 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000397
398 // Fill reference
399 fill(src, 0, -1.f, 1.f);
400
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000401 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000402 }
403
404 TensorType _target{};
405 SimpleTensor<T> _reference{};
406};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000407
408template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
409class WinogradFilterTransformValidationFixture : public framework::Fixture
410{
411public:
412 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000413 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000414 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000415 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
416 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000417
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000418 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
419 _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000420 }
421
422protected:
423 template <typename U>
424 void fill(U &&tensor, int i, float min, float max)
425 {
426 switch(tensor.data_type())
427 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100428 case DataType::F16:
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000429 case DataType::F32:
430 {
431 std::uniform_real_distribution<> distribution(min, max);
432 library->fill(tensor, distribution, i);
433 break;
434 }
435 default:
436 {
437 ARM_COMPUTE_ERROR("Not supported");
438 library->fill_tensor_uniform(tensor, i);
439 break;
440 }
441 }
442 }
443
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100444 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000445 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100446 if(data_layout == DataLayout::NHWC)
447 {
448 permute(input_shape, PermutationVector(2U, 0U, 1U));
449 }
450
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000451 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100452 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
453 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000454
455 // Create and configure function
456 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000457 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000458
459 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
460 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
461
462 // Allocate tensors
463 src.allocator()->allocate();
464 dst.allocator()->allocate();
465
466 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
467 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
468
469 // Fill tensors
470 fill(AccessorType(src), 0, -1.f, 1.f);
471
472 filter_transform.run();
473
474 return dst;
475 }
476
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000477 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000478 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000479 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100480 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000481
482 // Fill reference
483 fill(src, 0, -1.f, 1.f);
484
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000485 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000486 }
487
488 TensorType _target{};
489 SimpleTensor<T> _reference{};
490};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000491
492template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
493class WinogradOutputTransformValidationFixture : public framework::Fixture
494{
495public:
496 template <typename...>
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100497 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000498 {
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100499 _target = compute_target(input_shape, winograd_info, data_type, act_info);
500 _reference = compute_reference(input_shape, winograd_info, data_type, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000501 }
502
503protected:
504 template <typename U>
505 void fill(U &&tensor, int i, float min, float max)
506 {
507 switch(tensor.data_type())
508 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100509 case DataType::F16:
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000510 case DataType::F32:
511 {
512 std::uniform_real_distribution<> distribution(min, max);
513 library->fill(tensor, distribution, i);
514 break;
515 }
516 default:
517 {
518 ARM_COMPUTE_ERROR("Not supported");
519 library->fill_tensor_uniform(tensor, i);
520 break;
521 }
522 }
523 }
524
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100525 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000526 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100527 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
528
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000529 // Create tensors
Giorgio Arenaea55f912018-07-12 15:41:35 +0100530 TensorType src = create_tensor<TensorType>(input_shape, data_type);
531 TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
532 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000533
534 // Create and configure function
535 FunctionType output_transform;
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100536 output_transform.configure(&src, &bias, &dst, winograd_info, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000537
538 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100539 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000540 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
541
542 // Allocate tensors
543 src.allocator()->allocate();
Giorgio Arenaea55f912018-07-12 15:41:35 +0100544 bias.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000545 dst.allocator()->allocate();
546
547 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100548 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000549 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
550
551 // Fill tensors
552 fill(AccessorType(src), 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100553 fill(AccessorType(bias), 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000554
555 output_transform.run();
556
557 return dst;
558 }
559
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100560 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000561 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100562 winograd_info.output_data_layout = DataLayout::NCHW;
563 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
564
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000565 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000566 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100567 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000568
569 // Fill reference
570 fill(src, 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100571 fill(bias, 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000572
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100573 const SimpleTensor<T> winograd_output = reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
574
575 return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000576 }
577
578 TensorType _target{};
579 SimpleTensor<T> _reference{};
580};
Pablo Tello89519332017-11-17 11:52:36 +0000581} // namespace validation
582} // namespace test
583} // namespace arm_compute
584#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */