blob: e99eb910e88568ef2aad80e6c26848cb9e8e4809 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2018-2021 Arm Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000042#include "utils/Utils.h"
Pablo Tello89519332017-11-17 11:52:36 +000043
44#include <random>
45
46namespace arm_compute
47{
Pablo Tello89519332017-11-17 11:52:36 +000048namespace test
49{
50namespace validation
51{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000052using namespace arm_compute::misc::shape_calculator;
53
Manuel Bottinica62c6f2021-03-23 11:50:34 +000054template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010055class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
56{
57public:
58 template <typename...>
Pablo Tello7282d562018-06-14 15:35:49 +010059 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
60 DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
61
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010062 {
63 ARM_COMPUTE_UNUSED(dilation);
Manuel Bottinica62c6f2021-03-23 11:50:34 +000064 _mixed_layout = mixed_layout;
Giorgio Arena63825e82021-03-25 14:54:50 +000065 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
66 _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010067 }
68
69protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +000070 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
71 {
72 const DataLayout data_layout = src.info()->data_layout();
73 // Test Multi DataLayout graph cases, when the data layout changes after configure
74 src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
75 dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
76
77 // Compute Convolution function
78 layer.run();
79
80 // Reinstating original data layout for the test suite to properly check the values
81 src.info()->set_data_layout(data_layout);
82 dst.info()->set_data_layout(data_layout);
83 }
84
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010085 template <typename U>
86 void fill(U &&tensor, int i, float min, float max)
87 {
88 switch(tensor.data_type())
89 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +010090 case DataType::F16:
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000091 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +000092 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000093 library->fill(tensor, distribution, i);
94 break;
95 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010096 case DataType::F32:
97 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +000098 std::uniform_real_distribution<float> distribution(min, max);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010099 library->fill(tensor, distribution, i);
100 break;
101 }
102 default:
103 {
104 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100105 }
106 }
107 }
108
Pablo Tello7282d562018-06-14 15:35:49 +0100109 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
110 DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100111 {
Pablo Tello7282d562018-06-14 15:35:49 +0100112 if(data_layout == DataLayout::NHWC)
113 {
114 permute(input_shape, PermutationVector(2U, 0U, 1U));
115 permute(weights_shape, PermutationVector(2U, 0U, 1U));
116 permute(output_shape, PermutationVector(2U, 0U, 1U));
117 }
118
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100119 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100120 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
121 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
122 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
123 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100124
125 // Create and configure function
126 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100127 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
128 framework::LogLevel::ERRORS);
129 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100130
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100131 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
132 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
133 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
134 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100135
Giorgio Arena63825e82021-03-25 14:54:50 +0000136 add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
137
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100138 // Allocate tensors
139 src.allocator()->allocate();
140 weights.allocator()->allocate();
141 dst.allocator()->allocate();
142 bias.allocator()->allocate();
143
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100144 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
145 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
146 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
147 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100148
149 // Fill tensors
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100150 fill(AccessorType(src), 0, -0.5f, 0.5f);
151 fill(AccessorType(weights), 1, -0.5f, 0.5f);
152 fill(AccessorType(bias), 2, -0.5f, 0.5f);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100153
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000154 if(_mixed_layout)
155 {
156 mix_layout(conv, src, dst);
157 }
158 else
159 {
160 // Compute function
161 conv.run();
162 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100163 return dst;
164 }
165
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100166 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const PadStrideInfo &info,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100167 DataType data_type, ActivationLayerInfo act_info)
168 {
169 // Create reference
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000170 SimpleTensor<T> src_t{ input_shape, data_type, 1 };
171 SimpleTensor<T> weights_t{ weights_shape, data_type, 1 };
172 SimpleTensor<T> bias_t{ bias_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100173
174 // Fill reference
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100175 fill(src_t, 0, -0.5f, 0.5f);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000176 SimpleTensor<T1> src_t1(copy_tensor<T1, T>(src_t));
177
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100178 fill(weights_t, 1, -0.5f, 0.5f);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000179 SimpleTensor<T1> weights_t1(copy_tensor<T1, T>(weights_t));
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100180 if(use_bias)
181 {
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100182 fill(bias_t, 2, -0.5f, 0.5f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100183 }
184 else
185 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000186 fill(bias_t, 2, 0.f, 0.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100187 }
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000188 SimpleTensor<T1> bias_t1(copy_tensor<T1, T>(bias_t));
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100189
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100190 // Set output tile
191 Size2D output_tile(4U, 4U);
Pablo Tello96e922e2018-09-26 11:25:15 +0100192 if(weights_shape[0] == 7 && weights_shape[1] == 1)
193 {
194 output_tile.width = 2;
195 output_tile.height = 1;
196 }
197 else if(weights_shape[0] == 1 && weights_shape[1] == 7)
198 {
199 output_tile.width = 1;
200 output_tile.height = 2;
201 }
202 else if(weights_shape[0] == 1)
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100203 {
204 output_tile.width = 1;
205 }
206 else if(weights_shape[1] == 1)
207 {
208 output_tile.height = 1;
209 }
210
211 WinogradInfo winograd_info(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100212 Size2D(weights_shape[0], weights_shape[1]),
213 Size2D(input_shape[0], input_shape[1]),
214 info,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000215 src_t1.data_layout());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100216
217 // Compute tensor shapes for input, filter and output transforms
218 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
219 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
220 TensorShape batched_gemm_shape = input_transform_shape;
221 batched_gemm_shape[0] = filter_transform_shape[0];
222 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
223
224 // Dummy matrix C to perform matrix multiplication
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000225 SimpleTensor<T1> dummy_c{ batched_gemm_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100226
227 // Compute Winograd-based convolution
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000228 SimpleTensor<T1> input_transform_out = reference::winograd_input_transform<T1>(src_t1, input_transform_shape, winograd_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100229
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000230 SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info);
231 SimpleTensor<T1> batched_gemm = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
232 SimpleTensor<T1> conv_out = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
233 SimpleTensor<T> conv_out_t(std::move(copy_tensor<T, T1>(conv_out)));
234 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t;
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100235 }
236
237 TensorType _target{};
238 SimpleTensor<T> _reference{};
Giorgio Arena63825e82021-03-25 14:54:50 +0000239 bool _mixed_layout{ false };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100240};
241
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000242template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000243class WinogradInputTransformValidationFixture : public framework::Fixture
244{
245public:
246 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000247 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000248 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000249 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena63825e82021-03-25 14:54:50 +0000250 _mixed_layout = mixed_layout;
251 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
252 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000253 }
254
255protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000256 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
257 {
258 const DataLayout data_layout_src = src.info()->data_layout();
259 const DataLayout data_layout_dst = dst.info()->data_layout();
260
261 // Test Multi DataLayout graph cases, when the data layout changes after configure
262 src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
263 dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
264
265 // Compute Convolution function
266 layer.run();
267
268 // Reinstating original data layout for the test suite to properly check the values
269 src.info()->set_data_layout(data_layout_src);
270 dst.info()->set_data_layout(data_layout_dst);
271 }
272
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000273 template <typename U>
274 void fill(U &&tensor, int i, float min, float max)
275 {
276 switch(tensor.data_type())
277 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100278 case DataType::F16:
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000279 {
Giorgio Arena33b103b2021-01-08 10:37:15 +0000280 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000281 library->fill(tensor, distribution, i);
282 break;
283 }
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000284 case DataType::F32:
285 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000286 std::uniform_real_distribution<float> distribution(min, max);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000287 library->fill(tensor, distribution, i);
288 break;
289 }
290 default:
291 {
292 ARM_COMPUTE_ERROR("Not supported");
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000293 }
294 }
295 }
296
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100297 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000298 {
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100299 if(data_layout == DataLayout::NHWC)
300 {
301 permute(input_shape, PermutationVector(2U, 0U, 1U));
302 }
303
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100304 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
305 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000306
307 // Create and configure function
308 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000309 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000310
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100311 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
312 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000313
Giorgio Arena63825e82021-03-25 14:54:50 +0000314 add_padding_x({ &src, &dst }, data_layout);
315
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000316 // Allocate tensors
317 src.allocator()->allocate();
318 dst.allocator()->allocate();
319
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100320 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
321 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000322
323 // Fill tensors
324 fill(AccessorType(src), 0, -1.f, 1.f);
325
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000326 if(_mixed_layout)
327 {
328 mix_layout(transf, src, dst);
329 }
330 else
331 {
332 // Compute Winograd input transform function
333 transf.run();
334 }
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000335 return dst;
336 }
337
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100338 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000339 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000340 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100341 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000342
343 // Fill reference
344 fill(src, 0, -1.f, 1.f);
345
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000346 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000347 }
348
Giorgio Arena63825e82021-03-25 14:54:50 +0000349 bool _mixed_layout{ false };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000350 TensorType _target{};
351 SimpleTensor<T> _reference{};
352};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000353
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000354template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000355class WinogradFilterTransformValidationFixture : public framework::Fixture
356{
357public:
358 template <typename...>
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000359 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000360 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000361 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
362 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000363
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000364 _mixed_layout = mixed_layout;
Giorgio Arena63825e82021-03-25 14:54:50 +0000365 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
366 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000367 }
368
369protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000370 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
371 {
372 const DataLayout data_layout_src = src.info()->data_layout();
373 const DataLayout data_layout_dst = dst.info()->data_layout();
374
375 // Test Multi DataLayout graph cases, when the data layout changes after configure
376 src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
377 dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
378
379 // Compute Convolution function
380 layer.run();
381
382 // Reinstating original data layout for the test suite to properly check the values
383 src.info()->set_data_layout(data_layout_src);
384 dst.info()->set_data_layout(data_layout_dst);
385 }
386
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000387 template <typename U>
388 void fill(U &&tensor, int i, float min, float max)
389 {
390 switch(tensor.data_type())
391 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100392 case DataType::F16:
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000393 {
Giorgio Arena33b103b2021-01-08 10:37:15 +0000394 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000395 library->fill(tensor, distribution, i);
396 break;
397 }
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000398 case DataType::F32:
399 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000400 std::uniform_real_distribution<float> distribution(min, max);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000401 library->fill(tensor, distribution, i);
402 break;
403 }
404 default:
405 {
406 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000407 }
408 }
409 }
410
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100411 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000412 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100413 if(data_layout == DataLayout::NHWC)
414 {
415 permute(input_shape, PermutationVector(2U, 0U, 1U));
416 }
417
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000418 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100419 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
420 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000421
422 // Create and configure function
423 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000424 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000425
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100426 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
427 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000428
Giorgio Arena63825e82021-03-25 14:54:50 +0000429 add_padding_x({ &src, &dst }, data_layout);
430
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000431 // Allocate tensors
432 src.allocator()->allocate();
433 dst.allocator()->allocate();
434
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100435 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
436 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000437
438 // Fill tensors
439 fill(AccessorType(src), 0, -1.f, 1.f);
440
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000441 if(_mixed_layout)
442 {
443 mix_layout(filter_transform, src, dst);
444 }
445 else
446 {
447 // Compute Winograd filter transform function
448 filter_transform.run();
449 }
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000450 return dst;
451 }
452
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100453 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000454 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000455 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100456 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000457
458 // Fill reference
459 fill(src, 0, -1.f, 1.f);
460
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000461 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000462 }
463
Giorgio Arena63825e82021-03-25 14:54:50 +0000464 bool _mixed_layout{ false };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000465 TensorType _target{};
466 SimpleTensor<T> _reference{};
467};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000468
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000469template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000470class WinogradOutputTransformValidationFixture : public framework::Fixture
471{
472public:
473 template <typename...>
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100474 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000475 {
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100476 _target = compute_target(input_shape, winograd_info, data_type, act_info);
477 _reference = compute_reference(input_shape, winograd_info, data_type, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000478 }
479
480protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000481 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
482 {
483 const DataLayout data_layout_src = src.info()->data_layout();
484 const DataLayout data_layout_dst = dst.info()->data_layout();
485
486 // Test Multi DataLayout graph cases, when the data layout changes after configure
487 src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
488 dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
489
490 // Compute Convolution function
491 layer.run();
492
493 // Reinstating original data layout for the test suite to properly check the values
494 src.info()->set_data_layout(data_layout_src);
495 dst.info()->set_data_layout(data_layout_dst);
496 }
497
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000498 template <typename U>
499 void fill(U &&tensor, int i, float min, float max)
500 {
501 switch(tensor.data_type())
502 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100503 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000504 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000505 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000506 library->fill(tensor, distribution, i);
507 break;
508 }
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000509 case DataType::F32:
510 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000511 std::uniform_real_distribution<float> distribution(min, max);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000512 library->fill(tensor, distribution, i);
513 break;
514 }
515 default:
516 {
517 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000518 }
519 }
520 }
521
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100522 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000523 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100524 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
525
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000526 // Create tensors
Giorgio Arenaea55f912018-07-12 15:41:35 +0100527 TensorType src = create_tensor<TensorType>(input_shape, data_type);
528 TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
529 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000530
531 // Create and configure function
532 FunctionType output_transform;
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100533 output_transform.configure(&src, &bias, &dst, winograd_info, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000534
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100535 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
536 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
537 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000538
Giorgio Arena63825e82021-03-25 14:54:50 +0000539 add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout);
540
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000541 // Allocate tensors
542 src.allocator()->allocate();
Giorgio Arenaea55f912018-07-12 15:41:35 +0100543 bias.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000544 dst.allocator()->allocate();
545
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100546 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
547 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
548 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000549
550 // Fill tensors
551 fill(AccessorType(src), 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100552 fill(AccessorType(bias), 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000553
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000554 if(_mixed_layout)
555 {
556 mix_layout(output_transform, src, dst);
557 }
558 else
559 {
560 // Compute Winograd output transform function
561 output_transform.run();
562 }
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000563 return dst;
564 }
565
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100566 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000567 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100568 winograd_info.output_data_layout = DataLayout::NCHW;
569 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
570
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000571 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000572 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100573 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000574
575 // Fill reference
576 fill(src, 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100577 fill(bias, 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000578
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100579 const SimpleTensor<T> winograd_output = reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
580
581 return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000582 }
583
Giorgio Arena63825e82021-03-25 14:54:50 +0000584 bool _mixed_layout{ false };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000585 TensorType _target{};
586 SimpleTensor<T> _reference{};
587};
Pablo Tello89519332017-11-17 11:52:36 +0000588} // namespace validation
589} // namespace test
590} // namespace arm_compute
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000591#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */