blob: 0622e5e6f0f2a64b01168dc0def7f2f35b961a39 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Jonathan Deakin464ed202023-01-12 11:41:14 +00002 * Copyright (c) 2017-2023 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gunes Bayir93a77cd2023-10-13 16:58:41 +010024
25#ifndef ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
26#define ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010027
28#include "arm_compute/core/TensorShape.h"
29#include "arm_compute/core/Types.h"
Gunes Bayircc171f92021-09-13 13:38:29 +010030#include "arm_compute/graph/Utils.h"
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000031#ifdef ARM_COMPUTE_OPENCL_ENABLED
32#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
33#endif // ARM_COMPUTE_OPENCL_ENABLED
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010034#include "arm_compute/runtime/NEON/NEScheduler.h"
Francesco Petrogalli553f6952022-06-30 10:22:01 +000035#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
Gunes Bayircc171f92021-09-13 13:38:29 +010036#include "src/graph/mutators/MutatorUtils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010037#include "tests/AssetsLibrary.h"
38#include "tests/Globals.h"
39#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010040#include "tests/framework/Asserts.h"
41#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010042#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000043#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000044#include "tests/validation/reference/ConvolutionLayer.h"
Gunes Bayircc171f92021-09-13 13:38:29 +010045#include "tests/validation/reference/PadLayer.h"
Michalis Spyroue2503892018-04-23 15:17:31 +010046#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000047#include "tests/validation/reference/Utils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010048
49#include <random>
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000050#include <type_traits>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010051
52namespace arm_compute
53{
54namespace test
55{
56namespace validation
57{
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000058namespace detail
59{
60template <typename ConvolutionFunction, typename TensorType>
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000061#ifdef ARM_COMPUTE_OPENCL_ENABLED
62std::enable_if_t<!std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
63#else // ARM_COMPUTE_OPENCL_ENABLED
64void
65#endif // ARM_COMPUTE_OPENCL_ENABLED
66configure_conv_function(ConvolutionFunction &func,
67 TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
68 const PadStrideInfo &info, const WeightsInfo &weights_info,
69 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
70{
71 func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, false /* enable_fast_math */, num_groups);
72}
73
74#ifdef ARM_COMPUTE_OPENCL_ENABLED
75template <typename ConvolutionFunction, typename TensorType>
76std::enable_if_t<std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
77configure_conv_function(ConvolutionFunction &func,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000078 TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
79 const PadStrideInfo &info, const WeightsInfo &weights_info,
80 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
81{
82 func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, num_groups);
83}
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000084#endif // ARM_COMPUTE_OPENCL_ENABLED
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000085} // namespace detail
86
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010087template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
Chunosov5124be52017-11-22 20:42:13 +070088class ConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010089{
90public:
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000091 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
92 || std::is_same<typename std::decay<T>::type, int8_t>::value,
93 int32_t, T >::type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010094
Gunes Bayir93a77cd2023-10-13 16:58:41 +010095 void setup_quantization(TensorShape input_shape, TensorShape weights_shape, QuantizationInfo &input_q_info,
96 QuantizationInfo &weights_q_info, DataType data_type)
97 {
98 const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
99 const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
100
101 std::mt19937 generator(library->seed() + _hash);
102 std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
103 std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
104
105 const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
106 const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
107
108 const int32_t offset_lhs = distribution_t(generator);
109 const int32_t offset_rhs = distribution_t(generator);
110
111 _quantization_info = QuantizationInfo(scale_lhs, offset_lhs);
112 _weight_quantization_info = QuantizationInfo(scale_rhs, offset_rhs);
113
114 QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
115 weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */,
116 data_type, 0.5f /* bias_fraction */);
117
118 _dst_q_info = q_hint.q_info;
119 _min_bias = q_hint.bias_min;
120 _max_bias = q_hint.bias_max;
121 }
122
Chunosov5124be52017-11-22 20:42:13 +0700123public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000124 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
Giorgio Arena63825e82021-03-25 14:54:50 +0000125 DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
SiCong Lic5ab4df2023-10-17 17:38:57 +0100126 bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false)
Chunosov5124be52017-11-22 20:42:13 +0700127 {
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100128 // This hash is used by random generators. There may be hash collisions but
129 // this is intentional as it's a very easy way to make the the current
130 // random generation process almost different for many test configurations,
131 // which were using the same set of values before.
132 _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
133 + weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] +
134 mixed_layout + (data_type == DataType::QASYMM8_SIGNED) + (data_layout == DataLayout::NHWC);
135
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000136 _mixed_layout = mixed_layout;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100137 _data_type = data_type;
138 _weights_data_type = weights_data_type;
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100139 const bool is_quantized = is_data_type_quantized(weights_data_type);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000140 _is_bfloat16 = data_type == DataType::BFLOAT16;
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100141 _bias_data_type = is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000142 _output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100143 _quantization_info = quantization_info;
144 _weight_quantization_info = weight_quantization_info;
145 _data_layout = data_layout;
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100146 _dst_q_info = quantization_info;
147
148 if(is_quantized && !is_data_type_quantized_symmetric(weights_data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
149 {
150 setup_quantization(input_shape, weights_shape, _quantization_info, _weight_quantization_info, data_type);
151 _use_dynamic_output_quant = true;
152 }
Chunosov5124be52017-11-22 20:42:13 +0700153
SiCong Lic5ab4df2023-10-17 17:38:57 +0100154 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer, padded_weights);
Gunes Bayircc171f92021-09-13 13:38:29 +0100155 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100156 }
157
158protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000159 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
160 {
161 // Test Multi DataLayout graph cases, when the data layout changes after configure
162 src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
163 dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
164
165 // Compute Convolution function
166 layer.run();
167
168 // Reinstating original data layout for the test suite to properly check the values
169 src.info()->set_data_layout(_data_layout);
170 dst.info()->set_data_layout(_data_layout);
171 }
172
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000173 void regularize_values(void *values, size_t size)
174 {
175 float *fvalues = static_cast<float *>(values);
176 for(size_t i = 0; i < size; ++i)
177 {
178 fvalues[i] = float(bfloat16(fvalues[i]));
179 }
180 }
181
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100182 template <typename U>
183 void fill(U &&tensor, int i)
184 {
185 switch(tensor.data_type())
186 {
Chunosov5124be52017-11-22 20:42:13 +0700187 case DataType::QASYMM8:
188 {
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100189 if(_use_dynamic_output_quant)
190 {
191 std::uniform_int_distribution<int32_t> distribution(0, 255);
192 library->fill(tensor, distribution, i);
193 }
194 else
195 {
196 // Legacy initialization in case the output quantization info can't be reliably estimated
197 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
198 std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
199 library->fill(tensor, distribution, i);
200 }
Chunosov5124be52017-11-22 20:42:13 +0700201 break;
202 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000203 case DataType::QASYMM8_SIGNED:
204 {
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100205 if(_use_dynamic_output_quant)
206 {
207 std::uniform_int_distribution<int32_t> distribution(-128, 127);
208 library->fill(tensor, distribution, i);
209 }
210 else
211 {
212 // Legacy initialization in case the output quantization info can't be reliably estimated
213 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
214 std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
215 library->fill(tensor, distribution, i);
216 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000217 break;
218 }
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100219 case DataType::QSYMM8_PER_CHANNEL:
220 {
221 int min_bound = 128;
222 int max_bound = -127;
223 for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++)
224 {
225 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
226 if(bounds.first < min_bound)
227 {
228 min_bound = bounds.first;
229 }
230 if(bounds.second > max_bound)
231 {
232 max_bound = bounds.second;
233 }
234 }
Pablo Tello29cab362022-03-10 17:05:34 +0000235 std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100236 library->fill(tensor, distribution, i);
237 break;
238 }
Chunosov5124be52017-11-22 20:42:13 +0700239 case DataType::S32:
240 {
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100241 std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
Chunosov5124be52017-11-22 20:42:13 +0700242 library->fill(tensor, distribution, i);
243 break;
244 }
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000245 case DataType::BFLOAT16:
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000246 {
247 arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f };
248 library->fill(tensor, distribution, i);
249 break;
250 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100251 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000252 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000253 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000254 library->fill(tensor, distribution, i);
255 break;
256 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100257 case DataType::F32:
258 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000259 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100260 library->fill(tensor, distribution, i);
261 break;
262 }
263 default:
264 library->fill_tensor_uniform(tensor, i);
265 }
266 }
267
Gunes Bayircc171f92021-09-13 13:38:29 +0100268 // given input is IN nchw format
Michalis Spyroue2503892018-04-23 15:17:31 +0100269 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
SiCong Lic5ab4df2023-10-17 17:38:57 +0100270 bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100271 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100272 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
273
274 const unsigned int num_groups = input_shape[2] / weights_shape[2];
275
Michalis Spyroue2503892018-04-23 15:17:31 +0100276 if(_data_layout == DataLayout::NHWC)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100277 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100278 permute(input_shape, PermutationVector(2U, 0U, 1U));
279 permute(weights_shape, PermutationVector(2U, 0U, 1U));
280 permute(output_shape, PermutationVector(2U, 0U, 1U));
Gunes Bayircc171f92021-09-13 13:38:29 +0100281
282 if(pre_pad_layer.size() > 0)
283 {
284 // make sure paddings exist for each c,h,w dimensions
285 for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i)
286 {
287 pre_pad_layer.push_back({ 0, 0 });
288 }
289
290 // rotate padding info from nchw to nhwc
291 std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3);
292 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100293 }
294
Michalis Spyroue2503892018-04-23 15:17:31 +0100295 const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
296 const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
297
298 WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
299 TensorShape reshaped_weights_shape(weights_shape);
300
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100301 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100302 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100303 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100304 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, QuantizationInfo() /*bias is not a quantized type*/, _data_layout);
305 TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _dst_q_info, _data_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100306
307 // Create and configure function
308 FunctionType conv;
Gunes Bayircc171f92021-09-13 13:38:29 +0100309
310 const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT);
311 const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH);
312
313 const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0);
314 const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0);
315
316 if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer))
317 {
318 // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution
319 const PadStrideInfo new_conv_info(
320 info.stride().first,
321 info.stride().second,
322 info.pad_left() + pad_w.first,
323 info.pad_right() + pad_w.second,
324 info.pad_top() + pad_h.first,
325 info.pad_bottom() + pad_h.second,
326 info.round());
327 detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups);
328 }
329 else
330 {
331 detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
332 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100333
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100334 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
335 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
336 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
337 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
SiCong Lic5ab4df2023-10-17 17:38:57 +0100338 // Test "add padding after configure" behavior. This behavior should not affect the correctness
339 add_padding_x({ &src, &bias, &dst }, _data_layout);
340 // Padding weights may affect code path in some backends
341 if (padded_weights)
342 {
343 add_padding_x({ &weights }, _data_layout);
344 }
Giorgio Arena63825e82021-03-25 14:54:50 +0000345
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100346 // Allocate tensors
347 src.allocator()->allocate();
348 weights.allocator()->allocate();
349 bias.allocator()->allocate();
350 dst.allocator()->allocate();
351
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100352 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
353 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
354 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
355 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100356
357 // Fill tensors
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100358 fill(AccessorType(src), 0 + _hash);
359 fill(AccessorType(weights), 1 + _hash);
360 fill(AccessorType(bias), 2 + _hash);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100361
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000362 if(_mixed_layout)
363 {
364 mix_layout(conv, src, dst);
365 }
366 else
367 {
368 // Compute Convolution function
369 conv.run();
370 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100371
372 return dst;
373 }
374
Alex Gilday7da29b62018-03-23 14:16:00 +0000375 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Gunes Bayircc171f92021-09-13 13:38:29 +0100376 const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100377 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100378 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
379
380 const unsigned int num_groups = input_shape[2] / weights_shape[2];
381
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000382 // Setup reference data types
383 const DataType src_dt = _is_bfloat16 ? DataType::F32 : _data_type;
384 const DataType weights_dt = _is_bfloat16 ? DataType::F32 : _weights_data_type;
385 const DataType bias_dt = _is_bfloat16 ? DataType::F32 : _bias_data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100386
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000387 // Create reference
388 SimpleTensor<T> src{ input_shape, src_dt, 1, _quantization_info };
389 SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
390 SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
391
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100392 fill(src, 0 + _hash);
393 fill(weights, 1 + _hash);
394 fill(bias, 2 + _hash);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100395
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000396 // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
397 if(_is_bfloat16)
398 {
399 regularize_values(static_cast<void *>(src.data()), src.num_elements());
400 regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
401 }
402
Gunes Bayircc171f92021-09-13 13:38:29 +0100403 if(pre_pad_layer.size() > 0)
404 {
405 src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT);
406 }
407
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100408 return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000409 act_info) :
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100410 reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100411 }
412
Chunosov5124be52017-11-22 20:42:13 +0700413 TensorType _target{};
414 SimpleTensor<T> _reference{};
415 DataType _data_type{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100416 DataType _weights_data_type{};
Chunosov5124be52017-11-22 20:42:13 +0700417 DataType _bias_data_type{};
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000418 DataType _output_data_type{};
Michalis Spyroue2503892018-04-23 15:17:31 +0100419 DataLayout _data_layout{};
Chunosov5124be52017-11-22 20:42:13 +0700420 QuantizationInfo _quantization_info{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100421 QuantizationInfo _weight_quantization_info{};
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100422 QuantizationInfo _dst_q_info{};
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000423 bool _is_bfloat16 = false;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000424 bool _mixed_layout = false;
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100425 bool _use_dynamic_output_quant{false};
426 int32_t _hash{0};
427 int32_t _min_bias{-100};
428 int32_t _max_bias{100};
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100429};
430
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000431template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100432class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100433{
434public:
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000435 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100436 DataLayout data_layout, ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100437 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100438 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
439 data_type, data_type, data_layout,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000440 QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
Chunosov5124be52017-11-22 20:42:13 +0700441 }
442};
443
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000444template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
SiCong Lic5ab4df2023-10-17 17:38:57 +0100445class ConvolutionValidationPaddedWeightsFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
446{
447public:
448 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
449 DataLayout data_layout)
450 {
451 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
452 data_type, data_type, data_layout,
453 QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), mixed_layout, PaddingList({}), true);
454 }
455};
456
457template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Gunes Bayircc171f92021-09-13 13:38:29 +0100458class ConvolutionValidationWithPaddingFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
459{
460public:
Gunes Bayircc171f92021-09-13 13:38:29 +0100461 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
462 DataLayout data_layout, ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
463 {
464 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
465 data_type, data_type, data_layout,
466 QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout, pre_pad_layer);
467 }
468};
469
470template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100471class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Chunosov5124be52017-11-22 20:42:13 +0700472{
473public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000474 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100475 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700476 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100477 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000478 data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100479 }
480};
481
482template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
483class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
484{
485public:
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100486 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
487 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
488 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000489 std::vector<float> weights_scales{};
490 std::mt19937 gen(library->seed());
491 std::uniform_real_distribution<float> dis(0.01f, 1.f);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100492 for(size_t i = 0; i < output_shape[2]; ++i)
493 {
494 weights_scales.push_back(dis(gen));
495 }
496 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
497 reshape_weights, data_type, weights_data_type, data_layout,
498 quantization_info, QuantizationInfo(weights_scales), act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100499 }
500};
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000501
SiCong Lic5ab4df2023-10-17 17:38:57 +0100502
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000503#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
Ramy Elgammal91780022022-07-20 14:57:37 +0100504inline TensorInfo prepare_weights(const TensorInfo tensor_info, const arm_compute::WeightFormat weight_format)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000505{
506 const DataLayout data_layout = tensor_info.data_layout();
507 ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
508 const DataType data_type = tensor_info.data_type();
509 const TensorShape tensor_shape = tensor_info.tensor_shape();
510 const int N = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
511 const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
512 const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
513 const int C = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
514
Ramy Elgammal91780022022-07-20 14:57:37 +0100515 const int interleave_by = arm_compute::interleave_by(weight_format);
516 const int block_by = arm_compute::block_by(weight_format);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000517 const int Ip = arm_gemm::roundup<unsigned int>(C, block_by); // C'=I'
518 const int Op = arm_gemm::roundup<unsigned int>(N, interleave_by); // O'=N'
519
Jonathan Deakin464ed202023-01-12 11:41:14 +0000520 arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes();
521 strides_in_bytes.set(1, Ip * interleave_by * H * W * tensor_info.element_size());
522 strides_in_bytes.set(2, Ip * Op * tensor_info.element_size());
523
524 const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes();
525
526 // Total size needs to include padded dimensions
527 const size_t total_size_in_bytes = Op * H * W * Ip * tensor_info.element_size();
528
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000529 const TensorShape TS(Ip, W, H, Op);
Jonathan Deakin464ed202023-01-12 11:41:14 +0000530
531 TensorInfo new_tensor_info = tensor_info;
532 new_tensor_info.init(TS, 1 /*num_channels, deprecated*/, data_type, strides_in_bytes,
533 offset_first_element_in_bytes, total_size_in_bytes);
534 return new_tensor_info;
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000535}
536
537template <typename ScalarType, typename AccessorType>
Ramy Elgammal91780022022-07-20 14:57:37 +0100538inline void rearrange_data(const AccessorType src, AccessorType dst, const arm_compute::WeightFormat weight_format)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000539{
Ramy Elgammal91780022022-07-20 14:57:37 +0100540 ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(weight_format), framework::LogLevel::ERRORS);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000541 // Data Layout: OHWIo<interleave_by>i<block_by>
Ramy Elgammal91780022022-07-20 14:57:37 +0100542 const int interleave_by = arm_compute::interleave_by(weight_format);
543 const int block_by = arm_compute::block_by(weight_format);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000544 const TensorShape src_tensor_shape = src.shape();
545 const DataLayout data_layout = src.data_layout();
546 ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
547 const unsigned int O = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
548 const unsigned int H = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
549 const unsigned int W = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
550 const unsigned int I = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
551 const unsigned int Ip = arm_gemm::roundup<unsigned int>(I, block_by); // C'=I'
552 const unsigned int Op = arm_gemm::roundup<unsigned int>(O, interleave_by); // N'=O'
553
554 ARM_COMPUTE_EXPECT_EQUAL(Op * H * W * Ip, (unsigned)dst.num_elements(), framework::LogLevel::ERRORS);
555 ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS);
556
557 const ScalarType *src_ptr = reinterpret_cast<const ScalarType *>(src.data());
558 ScalarType *dst_ptr = reinterpret_cast<ScalarType *>(dst.data());
559 for(unsigned i = 0; i < I; ++i)
560 for(unsigned w = 0; w < W; ++w)
561 for(unsigned h = 0; h < H; ++h)
562 for(unsigned o = 0; o < O; ++o)
563 {
564 ScalarType src_element;
565 switch(data_layout)
566 {
567 case DataLayout::NHWC:
568 {
569 src_element = src_ptr[o * H * W * I + h * W * I + w * I + i];
570 }
571 break;
572 default:
573 {
574 ARM_COMPUTE_ERROR("Unsupported memory layout.");
575 }
576 }
577 const int x5 = std::floor(((float)o) / interleave_by);
578 const int x4 = h;
579 const int x3 = w;
580 const int x2 = std::floor((float)i / block_by);
581 const int x1 = o % interleave_by;
582 const int x0 = i % block_by;
583 unsigned dst_idx = x5 * H * W * Ip * interleave_by
584 + x4 * W * Ip * interleave_by
585 + x3 * Ip * interleave_by
586 + x2 * interleave_by * block_by
587 + x1 * block_by
588 + x0;
589 dst_ptr[dst_idx] = src_element;
590 }
591}
592
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100593template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000594class VariableWeightsFixtureBaseClass : public framework::Fixture
595{
596public:
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000597 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataLayout data_layout,
598 const DataType data_type)
599 {
600 conv = std::make_unique<ConvolutionFunction>();
601 // prepare data
602 _data_layout = data_layout;
603 // Fixed format kernels for variable weights can work only with NHWC format.
604 ARM_COMPUTE_EXPECT_EQUAL(_data_layout, DataLayout::NHWC, framework::LogLevel::ERRORS);
605 _data_type = data_type;
606 // run the code
607 compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
608 compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
609 }
610 void teardown()
611 {
612 _target.allocator()->free();
613 }
614
615protected:
616 template <typename U>
617 void fill(U &&tensor, int i)
618 {
619 switch(tensor.data_type())
620 {
621 case DataType::F16:
622 {
623 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
624 library->fill(tensor, distribution, i);
625 break;
626 }
627 case DataType::F32:
628 {
629 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
630 library->fill(tensor, distribution, i);
631 break;
632 }
633 default:
634 library->fill_tensor_uniform(tensor, i);
635 }
636 }
637
638private:
639 virtual void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
640 const PadStrideInfo &conv_info,
641 const Size2D &dilation) = 0;
642
643 void compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &conv_info,
644 const Size2D &dilation)
645 {
646 // The dataset is always in NCHW format - we need to make C the
647 // innermost dimension because the fixed-format kernel work only
648 // with NHWC layout.
649 permute(input_shape, PermutationVector(2U, 0U, 1U));
650 permute(weights_shape, PermutationVector(2U, 0U, 1U));
651 permute(output_shape, PermutationVector(2U, 0U, 1U));
652 const auto src_tensor_info = TensorInfo(input_shape, 1, _data_type, _data_layout);
653 const auto weight_tensor_info = TensorInfo(weights_shape, 1, _data_type, _data_layout);
654 const auto bias_tensor_info = TensorInfo(bias_shape, 1, _data_type, _data_layout);
655 auto dst_tensor_info = TensorInfo(output_shape, 1, _data_type, _data_layout);
656
657 const int kernel_height = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT)];
658 const int kernel_width = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)];
659 const int num_kernels = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES)];
660
Ramy Elgammal91780022022-07-20 14:57:37 +0100661 const WeightsInfo query_weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, arm_compute::WeightFormat::ANY);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000662 const bool kernel_found = bool(ConvolutionFunction::has_opt_impl(_computed_weight_format, &src_tensor_info, &weight_tensor_info,
663 &bias_tensor_info, &dst_tensor_info, conv_info, query_weights_info));
664 // Make surethat the setup founds a fixed-format kernel as requested by the test case.
665 ARM_COMPUTE_EXPECT(kernel_found, framework::LogLevel::ERRORS);
Ramy Elgammal91780022022-07-20 14:57:37 +0100666 ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(_computed_weight_format), framework::LogLevel::ERRORS);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000667
668 const WeightsInfo weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, _computed_weight_format);
669 configure_and_execute_kernel(src_tensor_info, weight_tensor_info, bias_tensor_info, dst_tensor_info, weights_info, conv_info,
670 dilation);
671 }
672 void compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
673 const Size2D &dilation)
674 {
675 ARM_COMPUTE_UNUSED(input_shape, weights_shape, bias_shape, output_shape, info,
676 dilation);
677
678 // Create reference
679 SimpleTensor<ScalarType> src{ input_shape, _data_type };
680 SimpleTensor<ScalarType> weights{ weights_shape, _data_type };
681 SimpleTensor<ScalarType> bias{ bias_shape, _data_type };
682 fill(src, 0);
683 fill(bias, 1);
684 fill(weights, 3);
685 _reference = reference::convolution_layer<ScalarType>(src, weights, bias, output_shape, info, dilation, 1 /*num_groups*/);
686 }
687 DataLayout _data_layout{};
688 DataType _data_type{};
689
690protected:
691 std::unique_ptr<ConvolutionFunction> conv{};
Ramy Elgammal91780022022-07-20 14:57:37 +0100692 arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000693 TensorClass _target{};
694 SimpleTensor<ScalarType> _reference{};
695};
696
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100697template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
698class VariableWeightsFixture : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000699{
700 void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
701 const PadStrideInfo &conv_info,
702 const Size2D &dilation)
703 {
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100704 this->conv->configure(&src_tensor_info, &weight_tensor_info, &bias_tensor_info, &dst_tensor_info, conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000705
706 // Allocate input tensors
707 auto src = create_tensor<TensorClass>(src_tensor_info);
708 auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
709 const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
710 auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
711 auto bias = create_tensor<TensorClass>(bias_tensor_info);
712 src.allocator()->allocate();
713 weights_original.allocator()->allocate();
714 weights_transformed.allocator()->allocate();
715 bias.allocator()->allocate();
716 // Allocate destination tensor
717 this->_target = create_tensor<TensorClass>(dst_tensor_info);
718 this->_target.allocator()->allocate();
719
720 // Prepare source and biases that are left unchanged.
721 this->fill(AccessorType(src), 0);
722 this->fill(AccessorType(bias), 1);
723
724 // First run
725 this->fill(AccessorType(weights_original), 2);
726 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
727 ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weights_transformed }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &(this->_target) } };
728 this->conv->run(run_pack);
729 // Second run, with new weights
730 this->fill(AccessorType(weights_original), 3);
731 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
732 this->conv->run(run_pack);
733 src.allocator()->free();
734 weights_original.allocator()->free();
735 weights_transformed.allocator()->free();
736 bias.allocator()->free();
737 }
738};
739
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100740template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
741class VariableWeightsFixtureNEInterface : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000742{
743 void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
744 const PadStrideInfo &conv_info,
745 const Size2D &dilation)
746 {
747 // Allocate input tensors
748 auto src = create_tensor<TensorClass>(src_tensor_info);
749 auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
750 const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
751 auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
752 auto bias = create_tensor<TensorClass>(bias_tensor_info);
753 src.allocator()->allocate();
754 weights_original.allocator()->allocate();
755 weights_transformed.allocator()->allocate();
756 bias.allocator()->allocate();
757 // Allocate destination tensor
758 this->_target = create_tensor<TensorClass>(dst_tensor_info);
759 this->_target.allocator()->allocate();
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100760 this->conv->configure(&src, &weights_transformed, &bias, &(this->_target), conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000761 // Prepare source and biases that are left unchanged.
762 this->fill(AccessorType(src), 0);
763 this->fill(AccessorType(bias), 1);
764
765 // First run
766 this->fill(AccessorType(weights_original), 2);
767 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
768 this->conv->run();
769 // Second run, with new weights
770 this->fill(AccessorType(weights_original), 3);
771 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
772 this->conv->run();
773 src.allocator()->free();
774 weights_original.allocator()->free();
775 weights_transformed.allocator()->free();
776 bias.allocator()->free();
777 }
778};
779
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100780template <typename ConvolutionClass, bool enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000781class HasOptImplFixture : public framework::Fixture
782{
783public:
Ramy Elgammal91780022022-07-20 14:57:37 +0100784 void setup(DataType data_type, arm_compute::WeightFormat query_weight_format)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000785 {
786 auto conv = std::make_unique<ConvolutionClass>();
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100787 const auto src_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
788 const auto weight_info = TensorInfo(TensorShape(64, 3U, 3U, 64U), 1, enable_fast_math ? DataType::BFLOAT16 : data_type, DataLayout::NHWC);
789 const auto bias_info = TensorInfo(TensorShape(64U), 1, data_type, DataLayout::NHWC);
790 auto dst_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
791 const auto conv_info = PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR);
792 const WeightsInfo weights_info(false, 3U, 3U, 64U, false, query_weight_format);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000793 _kernel_found = bool(ConvolutionClass::has_opt_impl(_computed_weight_format, &src_info, &weight_info,
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100794 &bias_info, &dst_info, conv_info, weights_info,
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100795 Size2D(1U, 1U) /*dilation*/, ActivationLayerInfo() /*act_info*/, enable_fast_math));
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000796 }
797
798protected:
Ramy Elgammal91780022022-07-20 14:57:37 +0100799 bool _kernel_found{ false };
800 arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000801};
802#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
803
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100804} // namespace validation
805} // namespace test
806} // namespace arm_compute
Gunes Bayir93a77cd2023-10-13 16:58:41 +0100807
808#endif // ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H