blob: 20b678b36c3549b627b386c27bd55d4425800c94 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Jakub Sujakc5df0c62024-01-11 10:02:01 +00002 * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Jakub Sujakc5df0c62024-01-11 10:02:01 +000024#ifndef ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H
25#define ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H
Pablo Tello89519332017-11-17 11:52:36 +000026
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010038#include "tests/validation/reference/GEMM.h"
Giorgio Arena3695f9a2018-04-23 17:41:22 +010039#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000040#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000041#include "tests/validation/reference/Winograd.h"
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000042#include "utils/Utils.h"
Pablo Tello89519332017-11-17 11:52:36 +000043
44#include <random>
45
46namespace arm_compute
47{
Pablo Tello89519332017-11-17 11:52:36 +000048namespace test
49{
50namespace validation
51{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000052using namespace arm_compute::misc::shape_calculator;
53
Manuel Bottinica62c6f2021-03-23 11:50:34 +000054template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010055class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
56{
57public:
Pablo Tello7282d562018-06-14 15:35:49 +010058 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
59 DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
60
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010061 {
62 ARM_COMPUTE_UNUSED(dilation);
Manuel Bottinica62c6f2021-03-23 11:50:34 +000063 _mixed_layout = mixed_layout;
Giorgio Arena63825e82021-03-25 14:54:50 +000064 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
65 _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010066 }
67
68protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +000069 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
70 {
71 const DataLayout data_layout = src.info()->data_layout();
72 // Test Multi DataLayout graph cases, when the data layout changes after configure
73 src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
74 dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
75
76 // Compute Convolution function
77 layer.run();
78
79 // Reinstating original data layout for the test suite to properly check the values
80 src.info()->set_data_layout(data_layout);
81 dst.info()->set_data_layout(data_layout);
82 }
83
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010084 template <typename U>
85 void fill(U &&tensor, int i, float min, float max)
86 {
87 switch(tensor.data_type())
88 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +010089 case DataType::F16:
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000090 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +000091 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +000092 library->fill(tensor, distribution, i);
93 break;
94 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010095 case DataType::F32:
96 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +000097 std::uniform_real_distribution<float> distribution(min, max);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +010098 library->fill(tensor, distribution, i);
99 break;
100 }
101 default:
102 {
103 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100104 }
105 }
106 }
107
Pablo Tello7282d562018-06-14 15:35:49 +0100108 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
109 DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout)
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100110 {
Pablo Tello7282d562018-06-14 15:35:49 +0100111 if(data_layout == DataLayout::NHWC)
112 {
113 permute(input_shape, PermutationVector(2U, 0U, 1U));
114 permute(weights_shape, PermutationVector(2U, 0U, 1U));
115 permute(output_shape, PermutationVector(2U, 0U, 1U));
116 }
117
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100118 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100119 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
120 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
121 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
122 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100123
124 // Create and configure function
125 FunctionType conv;
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100126 ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
127 framework::LogLevel::ERRORS);
128 conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100129
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100130 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
131 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
132 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
133 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100134
Giorgio Arena63825e82021-03-25 14:54:50 +0000135 add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
136
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100137 // Allocate tensors
138 src.allocator()->allocate();
139 weights.allocator()->allocate();
140 dst.allocator()->allocate();
141 bias.allocator()->allocate();
142
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100143 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
144 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
145 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
146 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100147
148 // Fill tensors
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100149 fill(AccessorType(src), 0, -0.5f, 0.5f);
150 fill(AccessorType(weights), 1, -0.5f, 0.5f);
151 fill(AccessorType(bias), 2, -0.5f, 0.5f);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100152
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000153 if(_mixed_layout)
154 {
155 mix_layout(conv, src, dst);
156 }
157 else
158 {
159 // Compute function
160 conv.run();
161 }
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100162 return dst;
163 }
164
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100165 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const PadStrideInfo &info,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100166 DataType data_type, ActivationLayerInfo act_info)
167 {
168 // Create reference
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000169 SimpleTensor<T> src_t{ input_shape, data_type, 1 };
170 SimpleTensor<T> weights_t{ weights_shape, data_type, 1 };
171 SimpleTensor<T> bias_t{ bias_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100172
173 // Fill reference
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100174 fill(src_t, 0, -0.5f, 0.5f);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000175 SimpleTensor<T1> src_t1(copy_tensor<T1, T>(src_t));
176
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100177 fill(weights_t, 1, -0.5f, 0.5f);
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000178 SimpleTensor<T1> weights_t1(copy_tensor<T1, T>(weights_t));
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100179 if(use_bias)
180 {
Gian Marco Iodice6f688fd2019-08-19 11:45:26 +0100181 fill(bias_t, 2, -0.5f, 0.5f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100182 }
183 else
184 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000185 fill(bias_t, 2, 0.f, 0.f);
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100186 }
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000187 SimpleTensor<T1> bias_t1(copy_tensor<T1, T>(bias_t));
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100188
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100189 // Set output tile
190 Size2D output_tile(4U, 4U);
Pablo Tello96e922e2018-09-26 11:25:15 +0100191 if(weights_shape[0] == 7 && weights_shape[1] == 1)
192 {
193 output_tile.width = 2;
194 output_tile.height = 1;
195 }
196 else if(weights_shape[0] == 1 && weights_shape[1] == 7)
197 {
198 output_tile.width = 1;
199 output_tile.height = 2;
200 }
201 else if(weights_shape[0] == 1)
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100202 {
203 output_tile.width = 1;
204 }
205 else if(weights_shape[1] == 1)
206 {
207 output_tile.height = 1;
208 }
209
210 WinogradInfo winograd_info(output_tile,
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100211 Size2D(weights_shape[0], weights_shape[1]),
212 Size2D(input_shape[0], input_shape[1]),
213 info,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000214 src_t1.data_layout());
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100215
216 // Compute tensor shapes for input, filter and output transforms
217 TensorShape input_transform_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
218 TensorShape filter_transform_shape = compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, data_type), winograd_info);
219 TensorShape batched_gemm_shape = input_transform_shape;
220 batched_gemm_shape[0] = filter_transform_shape[0];
221 TensorShape output_transform_shape = compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, data_type), winograd_info);
222
223 // Dummy matrix C to perform matrix multiplication
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000224 SimpleTensor<T1> dummy_c{ batched_gemm_shape, data_type, 1 };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100225
226 // Compute Winograd-based convolution
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000227 SimpleTensor<T1> input_transform_out = reference::winograd_input_transform<T1>(src_t1, input_transform_shape, winograd_info);
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100228
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000229 SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info);
230 SimpleTensor<T1> batched_gemm = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
231 SimpleTensor<T1> conv_out = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
Jakub Sujakc5df0c62024-01-11 10:02:01 +0000232 SimpleTensor<T> conv_out_t(copy_tensor<T, T1>(conv_out));
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000233 return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t;
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100234 }
235
236 TensorType _target{};
237 SimpleTensor<T> _reference{};
Giorgio Arena63825e82021-03-25 14:54:50 +0000238 bool _mixed_layout{ false };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100239};
240
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000241template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000242class WinogradInputTransformValidationFixture : public framework::Fixture
243{
244public:
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000245 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000246 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000247 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Giorgio Arena63825e82021-03-25 14:54:50 +0000248 _mixed_layout = mixed_layout;
249 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
250 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000251 }
252
253protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000254 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
255 {
256 const DataLayout data_layout_src = src.info()->data_layout();
257 const DataLayout data_layout_dst = dst.info()->data_layout();
258
259 // Test Multi DataLayout graph cases, when the data layout changes after configure
260 src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
261 dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
262
263 // Compute Convolution function
264 layer.run();
265
266 // Reinstating original data layout for the test suite to properly check the values
267 src.info()->set_data_layout(data_layout_src);
268 dst.info()->set_data_layout(data_layout_dst);
269 }
270
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000271 template <typename U>
272 void fill(U &&tensor, int i, float min, float max)
273 {
274 switch(tensor.data_type())
275 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100276 case DataType::F16:
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000277 {
Giorgio Arena33b103b2021-01-08 10:37:15 +0000278 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000279 library->fill(tensor, distribution, i);
280 break;
281 }
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000282 case DataType::F32:
283 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000284 std::uniform_real_distribution<float> distribution(min, max);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000285 library->fill(tensor, distribution, i);
286 break;
287 }
288 default:
289 {
290 ARM_COMPUTE_ERROR("Not supported");
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000291 }
292 }
293 }
294
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100295 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000296 {
Giorgio Arenac42f28d2018-04-26 11:33:05 +0100297 if(data_layout == DataLayout::NHWC)
298 {
299 permute(input_shape, PermutationVector(2U, 0U, 1U));
300 }
301
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100302 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
303 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000304
305 // Create and configure function
306 FunctionType transf;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000307 transf.configure(&src, &dst, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000308
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100309 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
310 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000311
Giorgio Arena63825e82021-03-25 14:54:50 +0000312 add_padding_x({ &src, &dst }, data_layout);
313
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000314 // Allocate tensors
315 src.allocator()->allocate();
316 dst.allocator()->allocate();
317
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100318 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
319 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000320
321 // Fill tensors
322 fill(AccessorType(src), 0, -1.f, 1.f);
323
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000324 if(_mixed_layout)
325 {
326 mix_layout(transf, src, dst);
327 }
328 else
329 {
330 // Compute Winograd input transform function
331 transf.run();
332 }
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000333 return dst;
334 }
335
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100336 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000337 {
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000338 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100339 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000340
341 // Fill reference
342 fill(src, 0, -1.f, 1.f);
343
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000344 return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000345 }
346
Giorgio Arena63825e82021-03-25 14:54:50 +0000347 bool _mixed_layout{ false };
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000348 TensorType _target{};
349 SimpleTensor<T> _reference{};
350};
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000351
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000352template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000353class WinogradFilterTransformValidationFixture : public framework::Fixture
354{
355public:
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000356 void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000357 {
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000358 WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
359 TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000360
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000361 _mixed_layout = mixed_layout;
Giorgio Arena63825e82021-03-25 14:54:50 +0000362 _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
363 _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000364 }
365
366protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000367 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
368 {
369 const DataLayout data_layout_src = src.info()->data_layout();
370 const DataLayout data_layout_dst = dst.info()->data_layout();
371
372 // Test Multi DataLayout graph cases, when the data layout changes after configure
373 src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
374 dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
375
376 // Compute Convolution function
377 layer.run();
378
379 // Reinstating original data layout for the test suite to properly check the values
380 src.info()->set_data_layout(data_layout_src);
381 dst.info()->set_data_layout(data_layout_dst);
382 }
383
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000384 template <typename U>
385 void fill(U &&tensor, int i, float min, float max)
386 {
387 switch(tensor.data_type())
388 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100389 case DataType::F16:
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000390 {
Giorgio Arena33b103b2021-01-08 10:37:15 +0000391 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000392 library->fill(tensor, distribution, i);
393 break;
394 }
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000395 case DataType::F32:
396 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000397 std::uniform_real_distribution<float> distribution(min, max);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000398 library->fill(tensor, distribution, i);
399 break;
400 }
401 default:
402 {
403 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000404 }
405 }
406 }
407
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100408 TensorType compute_target(TensorShape input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000409 {
Giorgio Arenadcb5b282018-04-25 12:07:29 +0100410 if(data_layout == DataLayout::NHWC)
411 {
412 permute(input_shape, PermutationVector(2U, 0U, 1U));
413 }
414
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000415 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100416 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
417 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000418
419 // Create and configure function
420 FunctionType filter_transform;
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000421 filter_transform.configure(&src, &dst, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000422
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100423 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
424 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000425
Giorgio Arena63825e82021-03-25 14:54:50 +0000426 add_padding_x({ &src, &dst }, data_layout);
427
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000428 // Allocate tensors
429 src.allocator()->allocate();
430 dst.allocator()->allocate();
431
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100432 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
433 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000434
435 // Fill tensors
436 fill(AccessorType(src), 0, -1.f, 1.f);
437
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000438 if(_mixed_layout)
439 {
440 mix_layout(filter_transform, src, dst);
441 }
442 else
443 {
444 // Compute Winograd filter transform function
445 filter_transform.run();
446 }
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000447 return dst;
448 }
449
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100450 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000451 {
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000452 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100453 SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000454
455 // Fill reference
456 fill(src, 0, -1.f, 1.f);
457
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000458 return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000459 }
460
Giorgio Arena63825e82021-03-25 14:54:50 +0000461 bool _mixed_layout{ false };
Gian Marco Iodice7e4b2392018-02-22 16:17:20 +0000462 TensorType _target{};
463 SimpleTensor<T> _reference{};
464};
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000465
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000466template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000467class WinogradOutputTransformValidationFixture : public framework::Fixture
468{
469public:
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100470 void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000471 {
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100472 _target = compute_target(input_shape, winograd_info, data_type, act_info);
473 _reference = compute_reference(input_shape, winograd_info, data_type, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000474 }
475
476protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000477 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
478 {
479 const DataLayout data_layout_src = src.info()->data_layout();
480 const DataLayout data_layout_dst = dst.info()->data_layout();
481
482 // Test Multi DataLayout graph cases, when the data layout changes after configure
483 src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
484 dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
485
486 // Compute Convolution function
487 layer.run();
488
489 // Reinstating original data layout for the test suite to properly check the values
490 src.info()->set_data_layout(data_layout_src);
491 dst.info()->set_data_layout(data_layout_dst);
492 }
493
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000494 template <typename U>
495 void fill(U &&tensor, int i, float min, float max)
496 {
497 switch(tensor.data_type())
498 {
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100499 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000500 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000501 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000502 library->fill(tensor, distribution, i);
503 break;
504 }
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000505 case DataType::F32:
506 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000507 std::uniform_real_distribution<float> distribution(min, max);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000508 library->fill(tensor, distribution, i);
509 break;
510 }
511 default:
512 {
513 ARM_COMPUTE_ERROR("Not supported");
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000514 }
515 }
516 }
517
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100518 TensorType compute_target(const TensorShape &input_shape, const WinogradInfo &winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000519 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100520 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
521
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000522 // Create tensors
Giorgio Arenaea55f912018-07-12 15:41:35 +0100523 TensorType src = create_tensor<TensorType>(input_shape, data_type);
524 TensorType bias = create_tensor<TensorType>(output_shape[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)], data_type);
525 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000526
527 // Create and configure function
528 FunctionType output_transform;
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100529 output_transform.configure(&src, &bias, &dst, winograd_info, act_info);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000530
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100531 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
532 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
533 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000534
Giorgio Arena63825e82021-03-25 14:54:50 +0000535 add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout);
536
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000537 // Allocate tensors
538 src.allocator()->allocate();
Giorgio Arenaea55f912018-07-12 15:41:35 +0100539 bias.allocator()->allocate();
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000540 dst.allocator()->allocate();
541
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100542 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
543 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
544 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000545
546 // Fill tensors
547 fill(AccessorType(src), 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100548 fill(AccessorType(bias), 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000549
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000550 if(_mixed_layout)
551 {
552 mix_layout(output_transform, src, dst);
553 }
554 else
555 {
556 // Compute Winograd output transform function
557 output_transform.run();
558 }
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000559 return dst;
560 }
561
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100562 SimpleTensor<T> compute_reference(const TensorShape &input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info)
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000563 {
Giorgio Arena3695f9a2018-04-23 17:41:22 +0100564 winograd_info.output_data_layout = DataLayout::NCHW;
565 TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
566
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000567 // Create reference
Gian Marco Iodice247f52c2018-03-22 11:24:56 +0000568 SimpleTensor<T> src{ input_shape, data_type };
Gian Marco Iodice2213d4b2018-04-27 10:39:06 +0100569 SimpleTensor<T> bias{ TensorShape(input_shape[0]), data_type };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000570
571 // Fill reference
572 fill(src, 0, -1.f, 1.f);
Giorgio Arenaea55f912018-07-12 15:41:35 +0100573 fill(bias, 1, -1.f, 1.f);
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000574
Manuel Bottini0d0028c2018-10-02 16:41:52 +0100575 const SimpleTensor<T> winograd_output = reference::winograd_output_transform<T>(src, bias, output_shape, winograd_info);
576
577 return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000578 }
579
Giorgio Arena63825e82021-03-25 14:54:50 +0000580 bool _mixed_layout{ false };
Gian Marco Iodiced2fab732018-03-02 11:18:12 +0000581 TensorType _target{};
582 SimpleTensor<T> _reference{};
583};
Pablo Tello89519332017-11-17 11:52:36 +0000584} // namespace validation
585} // namespace test
586} // namespace arm_compute
Jakub Sujakc5df0c62024-01-11 10:02:01 +0000587#endif // ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H