blob: 410c2a544e17afbf6a1738480340af35845f3c83 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2019 Arm Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000042#include "utils/Utils.h"
Pablo Tello89519332017-11-17 11:52:36 +000043
44#include <random>
45
46namespace arm_compute
47{
Pablo Tello89519332017-11-17 11:52:36 +000048namespace test
49{
50namespace validation
51{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000052using namespace arm_compute::misc::shape_calculator;
53
Andrew Mundy4d9379a2018-03-15 16:47:03 +000054template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000055class WinogradConvolutionLayerValidationFixture : public framework::Fixture
Pablo Tello89519332017-11-17 11:52:36 +000056{
57public:
58 template <typename...>
Pablo Tello7df27862018-05-30 11:44:26 +010059 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
60 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000061 {
Alex Gilday7da29b62018-03-23 14:16:00 +000062 ARM_COMPUTE_UNUSED(dilation);
63
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000064 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
65 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
Pablo Tello89519332017-11-17 11:52:36 +000066 }
67
68protected:
69 template <typename U>
70 void fill(U &&tensor, int i, float min, float max)
71 {
72 switch(tensor.data_type())
73 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +010074 case DataType::F16:
Pablo Tello89519332017-11-17 11:52:36 +000075 case DataType::F32:
76 {
77 std::uniform_real_distribution<> distribution(min, max);
78 library->fill(tensor, distribution, i);
79 break;
80 }
81 default:
82 {
83 ARM_COMPUTE_ERROR("Not supported");
Pablo Tello89519332017-11-17 11:52:36 +000084 }
85 }
86 }
87
Pablo Tello7df27862018-05-30 11:44:26 +010088 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000089 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +000090 {
91 // Create tensors
Gian Marco Iodiced2fab732018-03-02 11:18:12 +000092 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
93 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
94 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
95 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
Pablo Tello89519332017-11-17 11:52:36 +000096
97 // Create and configure function
98 FunctionType conv;
Vidhya Sudhan Loganathan84ce1f92018-04-25 13:00:09 +010099 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000100 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
Pablo Tello89519332017-11-17 11:52:36 +0000101
102 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
103 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
104 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
105 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
106
107 // Allocate tensors
108 src.allocator()->allocate();
109 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000110 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000111 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000112
113 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
114 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
115 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
116 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
117
118 // Fill tensors
119 fill(AccessorType(src), 0, -1.f, 1.f);
120 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000121 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000122
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000123 // Compute Winograd Convolution function
Pablo Tello89519332017-11-17 11:52:36 +0000124 conv.run();
125
126 return dst;
127 }
128
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000129 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000130 DataType data_type, ActivationLayerInfo act_info)
Pablo Tello89519332017-11-17 11:52:36 +0000131 {
132 // Create reference
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000133 SimpleTensor<T> src{ input_shape, data_type, 1 };
134 SimpleTensor<T> weights{ weights_shape, data_type, 1 };
135 SimpleTensor<T> bias{ bias_shape, data_type, 1 };
Pablo Tello89519332017-11-17 11:52:36 +0000136
137 // Fill reference
138 fill(src, 0, -1.f, 1.f);
139 fill(weights, 1, -1.f, 1.f);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000140 if(use_bias)
141 {
142 fill(bias, 2, -1.f, 1.f);
143 }
144 else
145 {
146 fill(bias, 2, 0.f, 0.f);
147 }
Pablo Tello89519332017-11-17 11:52:36 +0000148
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000149 SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
150
151 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
Pablo Tello89519332017-11-17 11:52:36 +0000152 }
153
154 TensorType _target{};
155 SimpleTensor<T> _reference{};
Pablo Tello89519332017-11-17 11:52:36 +0000156};
157
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000158template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100159class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
160{
161public:
162 template <typename...>
Pablo Tello7282d562018-06-14 15:35:49 +0100163 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
164 DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
165
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100166 {
167 ARM_COMPUTE_UNUSED(dilation);
Pablo Tello7282d562018-06-14 15:35:49 +0100168 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100169 _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100170 }
171
172protected:
173 template <typename U>
174 void fill(U &&tensor, int i, float min, float max)
175 {
176 switch(tensor.data_type())
177 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100178 case DataType::F16:
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000179 {
180 arm_compute::utils::uniform_real_distribution_fp16 distribution((half)min, (half)max);
181 library->fill(tensor, distribution, i);
182 break;
183 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100184 case DataType::F32:
185 {
186 std::uniform_real_distribution<> distribution(min, max);
187 library->fill(tensor, distribution, i);
188 break;
189 }
190 default:
191 {
192 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100193 }
194 }
195 }
196
Pablo Tello7282d562018-06-14 15:35:49 +0100197 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
198 DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100199 {
Pablo Tello7282d562018-06-14 15:35:49 +0100200 if(data_layout == DataLayout::NHWC)
201 {
202 permute(input_shape, PermutationVector(2U, 0U, 1U));
203 permute(weights_shape, PermutationVector(2U, 0U, 1U));
204 permute(output_shape, PermutationVector(2U, 0U, 1U));
205 }
206
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100207 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100208 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
209 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
210 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
211 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100212
213 // Create and configure function
214 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100215 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
216 framework::LogLevel::ERRORS);
217 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100218
219 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
220 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
221 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
222 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
223
224 // Allocate tensors
225 src.allocator()->allocate();
226 weights.allocator()->allocate();
227 dst.allocator()->allocate();
228 bias.allocator()->allocate();
229
230 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
231 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
232 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
233 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
234
235 // Fill tensors
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100236 fill(AccessorType(src), 0, -0.5f, 0.5f);
237 fill(AccessorType(weights), 1, -0.5f, 0.5f);
238 fill(AccessorType(bias), 2, -0.5f, 0.5f);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100239
240 // Compute Winograd Convolution function
241 conv.run();
242
243 return dst;
244 }
245
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100246 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const PadStrideInfo &info,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100247 DataType data_type, ActivationLayerInfo act_info)
248 {
249 // Create reference
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000250 SimpleTensor<T> src_t{ input_shape, data_type, 1 };
251 SimpleTensor<T> weights_t{ weights_shape, data_type, 1 };
252 SimpleTensor<T> bias_t{ bias_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100253
254 // Fill reference
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100255 fill(src_t, 0, -0.5f, 0.5f);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000256 SimpleTensor<T1> src_t1(copy_tensor<T1, T>(src_t));
257
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100258 fill(weights_t, 1, -0.5f, 0.5f);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000259 SimpleTensor<T1> weights_t1(copy_tensor<T1, T>(weights_t));
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100260 if(use_bias)
261 {
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100262 fill(bias_t, 2, -0.5f, 0.5f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100263 }
264 else
265 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000266 fill(bias_t, 2, 0.f, 0.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100267 }
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000268 SimpleTensor<T1> bias_t1(copy_tensor<T1, T>(bias_t));
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100269
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100270 // Set output tile
271 Size2D output_tile(4U, 4U);
Pablo Tello96e922e2018-09-26 11:25:15 +0100272 if(weights_shape[0] == 7 && weights_shape[1] == 1)
273 {
274 output_tile.width = 2;
275 output_tile.height = 1;
276 }
277 else if(weights_shape[0] == 1 && weights_shape[1] == 7)
278 {
279 output_tile.width = 1;
280 output_tile.height = 2;
281 }
282 else if(weights_shape[0] == 1)
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100283 {
284 output_tile.width = 1;
285 }
286 else if(weights_shape[1] == 1)
287 {
288 output_tile.height = 1;
289 }
290
291 WinogradInfo winograd_info(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100292 Size2D(weights_shape[0], weights_shape[1]),
293 Size2D(input_shape[0], input_shape[1]),
294 info,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000295 src_t1.data_layout());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100296
297 // Compute tensor shapes for input, filter and output transforms
298 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
299 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
300 TensorShape batched_gemm_shape = input_transform_shape;
301 batched_gemm_shape[0] = filter_transform_shape[0];
302 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
303
304 // Dummy matrix C to perform matrix multiplication
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000305 SimpleTensor<T1> dummy_c{ batched_gemm_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100306
307 // Compute Winograd-based convolution
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000308 SimpleTensor<T1> input_transform_out = reference::winograd_input_transform<T1>(src_t1, input_transform_shape, winograd_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100309
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000310 SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info);
311 SimpleTensor<T1> batched_gemm = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
312 SimpleTensor<T1> conv_out = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
313 SimpleTensor<T> conv_out_t(std::move(copy_tensor<T, T1>(conv_out)));
314 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t;
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100315 }
316
317 TensorType _target{};
318 SimpleTensor<T> _reference{};
319};
320
321template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000322class WinogradInputTransformValidationFixture : public framework::Fixture
323{
324public:
325 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000326 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000327 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000328 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000329
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000330 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100331 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000332 }
333
334protected:
335 template <typename U>
336 void fill(U &&tensor, int i, float min, float max)
337 {
338 switch(tensor.data_type())
339 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100340 case DataType::F16:
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000341 case DataType::F32:
342 {
343 std::uniform_real_distribution<> distribution(min, max);
344 library->fill(tensor, distribution, i);
345 break;
346 }
347 default:
348 {
349 ARM_COMPUTE_ERROR("Not supported");
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000350 }
351 }
352 }
353
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100354 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000355 {
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100356 if(data_layout == DataLayout::NHWC)
357 {
358 permute(input_shape, PermutationVector(2U, 0U, 1U));
359 }
360
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100361 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
362 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000363
364 // Create and configure function
365 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000366 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000367
368 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
369 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
370
371 // Allocate tensors
372 src.allocator()->allocate();
373 dst.allocator()->allocate();
374
375 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
376 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
377
378 // Fill tensors
379 fill(AccessorType(src), 0, -1.f, 1.f);
380
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000381 // Compute Winograd input transform function
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000382 transf.run();
383
384 return dst;
385 }
386
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100387 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000388 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000389 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100390 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000391
392 // Fill reference
393 fill(src, 0, -1.f, 1.f);
394
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000395 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000396 }
397
398 TensorType _target{};
399 SimpleTensor<T> _reference{};
400};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000401
402template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
403class WinogradFilterTransformValidationFixture : public framework::Fixture
404{
405public:
406 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000407 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000408 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000409 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
410 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000411
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000412 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100413 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000414 }
415
416protected:
417 template <typename U>
418 void fill(U &&tensor, int i, float min, float max)
419 {
420 switch(tensor.data_type())
421 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100422 case DataType::F16:
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000423 case DataType::F32:
424 {
425 std::uniform_real_distribution<> distribution(min, max);
426 library->fill(tensor, distribution, i);
427 break;
428 }
429 default:
430 {
431 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000432 }
433 }
434 }
435
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100436 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000437 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100438 if(data_layout == DataLayout::NHWC)
439 {
440 permute(input_shape, PermutationVector(2U, 0U, 1U));
441 }
442
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000443 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100444 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
445 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000446
447 // Create and configure function
448 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000449 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000450
451 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
452 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
453
454 // Allocate tensors
455 src.allocator()->allocate();
456 dst.allocator()->allocate();
457
458 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
459 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
460
461 // Fill tensors
462 fill(AccessorType(src), 0, -1.f, 1.f);
463
464 filter_transform.run();
465
466 return dst;
467 }
468
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100469 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000470 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000471 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100472 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000473
474 // Fill reference
475 fill(src, 0, -1.f, 1.f);
476
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000477 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000478 }
479
480 TensorType _target{};
481 SimpleTensor<T> _reference{};
482};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000483
484template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
485class WinogradOutputTransformValidationFixture : public framework::Fixture
486{
487public:
488 template <typename...>
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100489 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000490 {
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100491 _target = compute_target(input_shape, winograd_info, data_type, act_info);
492 _reference = compute_reference(input_shape, winograd_info, data_type, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000493 }
494
495protected:
496 template <typename U>
497 void fill(U &&tensor, int i, float min, float max)
498 {
499 switch(tensor.data_type())
500 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100501 case DataType::F16:
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000502 case DataType::F32:
503 {
504 std::uniform_real_distribution<> distribution(min, max);
505 library->fill(tensor, distribution, i);
506 break;
507 }
508 default:
509 {
510 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000511 }
512 }
513 }
514
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100515 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000516 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100517 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
518
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000519 // Create tensors
Giorgio Arenaea55f912018-07-12 15:41:35 +0100520 TensorType src = create_tensor<TensorType>(input_shape, data_type);
521 TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
522 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000523
524 // Create and configure function
525 FunctionType output_transform;
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100526 output_transform.configure(&src, &bias, &dst, winograd_info, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000527
528 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100529 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000530 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
531
532 // Allocate tensors
533 src.allocator()->allocate();
Giorgio Arenaea55f912018-07-12 15:41:35 +0100534 bias.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000535 dst.allocator()->allocate();
536
537 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100538 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000539 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
540
541 // Fill tensors
542 fill(AccessorType(src), 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100543 fill(AccessorType(bias), 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000544
545 output_transform.run();
546
547 return dst;
548 }
549
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100550 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000551 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100552 winograd_info.output_data_layout = DataLayout::NCHW;
553 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
554
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000555 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000556 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100557 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000558
559 // Fill reference
560 fill(src, 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100561 fill(bias, 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000562
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100563 const SimpleTensor<T> winograd_output = reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
564
565 return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000566 }
567
568 TensorType _target{};
569 SimpleTensor<T> _reference{};
570};
Pablo Tello89519332017-11-17 11:52:36 +0000571} // namespace validation
572} // namespace test
573} // namespace arm_compute
574#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */