blob: 8562955b79925488312c695ca01e0e4f7d055bd2 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Jonathan Deakin464ed202023-01-12 11:41:14 +00002 * Copyright (c) 2017-2023 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Gunes Bayircc171f92021-09-13 13:38:29 +010029#include "arm_compute/graph/Utils.h"
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000030#ifdef ARM_COMPUTE_OPENCL_ENABLED
31#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
32#endif // ARM_COMPUTE_OPENCL_ENABLED
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010033#include "arm_compute/runtime/NEON/NEScheduler.h"
Francesco Petrogalli553f6952022-06-30 10:22:01 +000034#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
Gunes Bayircc171f92021-09-13 13:38:29 +010035#include "src/graph/mutators/MutatorUtils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010036#include "tests/AssetsLibrary.h"
37#include "tests/Globals.h"
38#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010039#include "tests/framework/Asserts.h"
40#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010041#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000042#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000043#include "tests/validation/reference/ConvolutionLayer.h"
Gunes Bayircc171f92021-09-13 13:38:29 +010044#include "tests/validation/reference/PadLayer.h"
Michalis Spyroue2503892018-04-23 15:17:31 +010045#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000046#include "tests/validation/reference/Utils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010047
48#include <random>
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000049#include <type_traits>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010050
51namespace arm_compute
52{
53namespace test
54{
55namespace validation
56{
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000057namespace detail
58{
59template <typename ConvolutionFunction, typename TensorType>
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000060#ifdef ARM_COMPUTE_OPENCL_ENABLED
61std::enable_if_t<!std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
62#else // ARM_COMPUTE_OPENCL_ENABLED
63void
64#endif // ARM_COMPUTE_OPENCL_ENABLED
65configure_conv_function(ConvolutionFunction &func,
66 TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
67 const PadStrideInfo &info, const WeightsInfo &weights_info,
68 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
69{
70 func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, false /* enable_fast_math */, num_groups);
71}
72
73#ifdef ARM_COMPUTE_OPENCL_ENABLED
74template <typename ConvolutionFunction, typename TensorType>
75std::enable_if_t<std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void>
76configure_conv_function(ConvolutionFunction &func,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000077 TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
78 const PadStrideInfo &info, const WeightsInfo &weights_info,
79 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
80{
81 func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, num_groups);
82}
Viet-Hoa Doa4ff9d02023-02-13 13:28:01 +000083#endif // ARM_COMPUTE_OPENCL_ENABLED
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000084} // namespace detail
85
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010086template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
Chunosov5124be52017-11-22 20:42:13 +070087class ConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010088{
89public:
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000090 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
91 || std::is_same<typename std::decay<T>::type, int8_t>::value,
92 int32_t, T >::type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010093
Chunosov5124be52017-11-22 20:42:13 +070094public:
Alex Gilday7da29b62018-03-23 14:16:00 +000095 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
Giorgio Arena63825e82021-03-25 14:54:50 +000096 DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
Gunes Bayircc171f92021-09-13 13:38:29 +010097 bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}))
Chunosov5124be52017-11-22 20:42:13 +070098 {
Manuel Bottinica62c6f2021-03-23 11:50:34 +000099 _mixed_layout = mixed_layout;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100100 _data_type = data_type;
101 _weights_data_type = weights_data_type;
102 _is_quantized = is_data_type_quantized_asymmetric(data_type);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000103 _is_bfloat16 = data_type == DataType::BFLOAT16;
104 _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
105 _output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100106 _quantization_info = quantization_info;
107 _weight_quantization_info = weight_quantization_info;
108 _data_layout = data_layout;
Chunosov5124be52017-11-22 20:42:13 +0700109
Gunes Bayircc171f92021-09-13 13:38:29 +0100110 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer);
111 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100112 }
113
114protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000115 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
116 {
117 // Test Multi DataLayout graph cases, when the data layout changes after configure
118 src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
119 dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
120
121 // Compute Convolution function
122 layer.run();
123
124 // Reinstating original data layout for the test suite to properly check the values
125 src.info()->set_data_layout(_data_layout);
126 dst.info()->set_data_layout(_data_layout);
127 }
128
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000129 void regularize_values(void *values, size_t size)
130 {
131 float *fvalues = static_cast<float *>(values);
132 for(size_t i = 0; i < size; ++i)
133 {
134 fvalues[i] = float(bfloat16(fvalues[i]));
135 }
136 }
137
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100138 template <typename U>
139 void fill(U &&tensor, int i)
140 {
141 switch(tensor.data_type())
142 {
Chunosov5124be52017-11-22 20:42:13 +0700143 case DataType::QASYMM8:
144 {
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100145 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
Pablo Tello29cab362022-03-10 17:05:34 +0000146 std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
Chunosov5124be52017-11-22 20:42:13 +0700147 library->fill(tensor, distribution, i);
148 break;
149 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000150 case DataType::QASYMM8_SIGNED:
151 {
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100152 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
Pablo Tello29cab362022-03-10 17:05:34 +0000153 std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000154 library->fill(tensor, distribution, i);
155 break;
156 }
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100157 case DataType::QSYMM8_PER_CHANNEL:
158 {
159 int min_bound = 128;
160 int max_bound = -127;
161 for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++)
162 {
163 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
164 if(bounds.first < min_bound)
165 {
166 min_bound = bounds.first;
167 }
168 if(bounds.second > max_bound)
169 {
170 max_bound = bounds.second;
171 }
172 }
Pablo Tello29cab362022-03-10 17:05:34 +0000173 std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100174 library->fill(tensor, distribution, i);
175 break;
176 }
Chunosov5124be52017-11-22 20:42:13 +0700177 case DataType::S32:
178 {
179 std::uniform_int_distribution<int32_t> distribution(-100, 100);
180 library->fill(tensor, distribution, i);
181 break;
182 }
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000183 case DataType::BFLOAT16:
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000184 {
185 arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f };
186 library->fill(tensor, distribution, i);
187 break;
188 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100189 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000190 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000191 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000192 library->fill(tensor, distribution, i);
193 break;
194 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100195 case DataType::F32:
196 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000197 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100198 library->fill(tensor, distribution, i);
199 break;
200 }
201 default:
202 library->fill_tensor_uniform(tensor, i);
203 }
204 }
205
Gunes Bayircc171f92021-09-13 13:38:29 +0100206 // given input is IN nchw format
Michalis Spyroue2503892018-04-23 15:17:31 +0100207 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Gunes Bayircc171f92021-09-13 13:38:29 +0100208 bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100209 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100210 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
211
212 const unsigned int num_groups = input_shape[2] / weights_shape[2];
213
Michalis Spyroue2503892018-04-23 15:17:31 +0100214 if(_data_layout == DataLayout::NHWC)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100215 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100216 permute(input_shape, PermutationVector(2U, 0U, 1U));
217 permute(weights_shape, PermutationVector(2U, 0U, 1U));
218 permute(output_shape, PermutationVector(2U, 0U, 1U));
Gunes Bayircc171f92021-09-13 13:38:29 +0100219
220 if(pre_pad_layer.size() > 0)
221 {
222 // make sure paddings exist for each c,h,w dimensions
223 for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i)
224 {
225 pre_pad_layer.push_back({ 0, 0 });
226 }
227
228 // rotate padding info from nchw to nhwc
229 std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3);
230 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100231 }
232
Michalis Spyroue2503892018-04-23 15:17:31 +0100233 const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
234 const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
235
236 WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
237 TensorShape reshaped_weights_shape(weights_shape);
238
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100239 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100240 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100241 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100242 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000243 TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100244
245 // Create and configure function
246 FunctionType conv;
Gunes Bayircc171f92021-09-13 13:38:29 +0100247
248 const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT);
249 const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH);
250
251 const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0);
252 const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0);
253
254 if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer))
255 {
256 // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution
257 const PadStrideInfo new_conv_info(
258 info.stride().first,
259 info.stride().second,
260 info.pad_left() + pad_w.first,
261 info.pad_right() + pad_w.second,
262 info.pad_top() + pad_h.first,
263 info.pad_bottom() + pad_h.second,
264 info.round());
265 detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups);
266 }
267 else
268 {
269 detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
270 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100271
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100272 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
273 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
274 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
275 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100276
Giorgio Arena63825e82021-03-25 14:54:50 +0000277 add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
278
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100279 // Allocate tensors
280 src.allocator()->allocate();
281 weights.allocator()->allocate();
282 bias.allocator()->allocate();
283 dst.allocator()->allocate();
284
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100285 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
286 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
287 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
288 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100289
290 // Fill tensors
291 fill(AccessorType(src), 0);
Michalis Spyroue2503892018-04-23 15:17:31 +0100292 fill(AccessorType(weights), 1);
293 fill(AccessorType(bias), 2);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100294
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000295 if(_mixed_layout)
296 {
297 mix_layout(conv, src, dst);
298 }
299 else
300 {
301 // Compute Convolution function
302 conv.run();
303 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100304
305 return dst;
306 }
307
Alex Gilday7da29b62018-03-23 14:16:00 +0000308 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Gunes Bayircc171f92021-09-13 13:38:29 +0100309 const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100310 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100311 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
312
313 const unsigned int num_groups = input_shape[2] / weights_shape[2];
314
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000315 // Setup reference data types
316 const DataType src_dt = _is_bfloat16 ? DataType::F32 : _data_type;
317 const DataType weights_dt = _is_bfloat16 ? DataType::F32 : _weights_data_type;
318 const DataType bias_dt = _is_bfloat16 ? DataType::F32 : _bias_data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100319
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000320 // Create reference
321 SimpleTensor<T> src{ input_shape, src_dt, 1, _quantization_info };
322 SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
323 SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
324
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100325 fill(src, 0);
326 fill(weights, 1);
327 fill(bias, 2);
328
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000329 // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
330 if(_is_bfloat16)
331 {
332 regularize_values(static_cast<void *>(src.data()), src.num_elements());
333 regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
334 }
335
Gunes Bayircc171f92021-09-13 13:38:29 +0100336 if(pre_pad_layer.size() > 0)
337 {
338 src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT);
339 }
340
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100341 return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000342 act_info) :
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100343 reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100344 }
345
Chunosov5124be52017-11-22 20:42:13 +0700346 TensorType _target{};
347 SimpleTensor<T> _reference{};
348 DataType _data_type{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100349 DataType _weights_data_type{};
Chunosov5124be52017-11-22 20:42:13 +0700350 DataType _bias_data_type{};
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000351 DataType _output_data_type{};
Michalis Spyroue2503892018-04-23 15:17:31 +0100352 DataLayout _data_layout{};
Chunosov5124be52017-11-22 20:42:13 +0700353 QuantizationInfo _quantization_info{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100354 QuantizationInfo _weight_quantization_info{};
Chunosov5124be52017-11-22 20:42:13 +0700355 bool _is_quantized = false;
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000356 bool _is_bfloat16 = false;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000357 bool _mixed_layout = false;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100358};
359
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000360template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100361class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100362{
363public:
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000364 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100365 DataLayout data_layout, ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100366 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100367 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
368 data_type, data_type, data_layout,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000369 QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
Chunosov5124be52017-11-22 20:42:13 +0700370 }
371};
372
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000373template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Gunes Bayircc171f92021-09-13 13:38:29 +0100374class ConvolutionValidationWithPaddingFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
375{
376public:
Gunes Bayircc171f92021-09-13 13:38:29 +0100377 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
378 DataLayout data_layout, ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
379 {
380 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
381 data_type, data_type, data_layout,
382 QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout, pre_pad_layer);
383 }
384};
385
386template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100387class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Chunosov5124be52017-11-22 20:42:13 +0700388{
389public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000390 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100391 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700392 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100393 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000394 data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100395 }
396};
397
398template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
399class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
400{
401public:
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100402 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
403 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
404 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000405 std::vector<float> weights_scales{};
406 std::mt19937 gen(library->seed());
407 std::uniform_real_distribution<float> dis(0.01f, 1.f);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100408 for(size_t i = 0; i < output_shape[2]; ++i)
409 {
410 weights_scales.push_back(dis(gen));
411 }
412 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
413 reshape_weights, data_type, weights_data_type, data_layout,
414 quantization_info, QuantizationInfo(weights_scales), act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100415 }
416};
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000417
418#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
Ramy Elgammal91780022022-07-20 14:57:37 +0100419inline TensorInfo prepare_weights(const TensorInfo tensor_info, const arm_compute::WeightFormat weight_format)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000420{
421 const DataLayout data_layout = tensor_info.data_layout();
422 ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
423 const DataType data_type = tensor_info.data_type();
424 const TensorShape tensor_shape = tensor_info.tensor_shape();
425 const int N = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
426 const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
427 const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
428 const int C = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
429
Ramy Elgammal91780022022-07-20 14:57:37 +0100430 const int interleave_by = arm_compute::interleave_by(weight_format);
431 const int block_by = arm_compute::block_by(weight_format);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000432 const int Ip = arm_gemm::roundup<unsigned int>(C, block_by); // C'=I'
433 const int Op = arm_gemm::roundup<unsigned int>(N, interleave_by); // O'=N'
434
Jonathan Deakin464ed202023-01-12 11:41:14 +0000435 arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes();
436 strides_in_bytes.set(1, Ip * interleave_by * H * W * tensor_info.element_size());
437 strides_in_bytes.set(2, Ip * Op * tensor_info.element_size());
438
439 const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes();
440
441 // Total size needs to include padded dimensions
442 const size_t total_size_in_bytes = Op * H * W * Ip * tensor_info.element_size();
443
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000444 const TensorShape TS(Ip, W, H, Op);
Jonathan Deakin464ed202023-01-12 11:41:14 +0000445
446 TensorInfo new_tensor_info = tensor_info;
447 new_tensor_info.init(TS, 1 /*num_channels, deprecated*/, data_type, strides_in_bytes,
448 offset_first_element_in_bytes, total_size_in_bytes);
449 return new_tensor_info;
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000450}
451
452template <typename ScalarType, typename AccessorType>
Ramy Elgammal91780022022-07-20 14:57:37 +0100453inline void rearrange_data(const AccessorType src, AccessorType dst, const arm_compute::WeightFormat weight_format)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000454{
Ramy Elgammal91780022022-07-20 14:57:37 +0100455 ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(weight_format), framework::LogLevel::ERRORS);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000456 // Data Layout: OHWIo<interleave_by>i<block_by>
Ramy Elgammal91780022022-07-20 14:57:37 +0100457 const int interleave_by = arm_compute::interleave_by(weight_format);
458 const int block_by = arm_compute::block_by(weight_format);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000459 const TensorShape src_tensor_shape = src.shape();
460 const DataLayout data_layout = src.data_layout();
461 ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS);
462 const unsigned int O = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
463 const unsigned int H = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
464 const unsigned int W = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
465 const unsigned int I = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
466 const unsigned int Ip = arm_gemm::roundup<unsigned int>(I, block_by); // C'=I'
467 const unsigned int Op = arm_gemm::roundup<unsigned int>(O, interleave_by); // N'=O'
468
469 ARM_COMPUTE_EXPECT_EQUAL(Op * H * W * Ip, (unsigned)dst.num_elements(), framework::LogLevel::ERRORS);
470 ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS);
471
472 const ScalarType *src_ptr = reinterpret_cast<const ScalarType *>(src.data());
473 ScalarType *dst_ptr = reinterpret_cast<ScalarType *>(dst.data());
474 for(unsigned i = 0; i < I; ++i)
475 for(unsigned w = 0; w < W; ++w)
476 for(unsigned h = 0; h < H; ++h)
477 for(unsigned o = 0; o < O; ++o)
478 {
479 ScalarType src_element;
480 switch(data_layout)
481 {
482 case DataLayout::NHWC:
483 {
484 src_element = src_ptr[o * H * W * I + h * W * I + w * I + i];
485 }
486 break;
487 default:
488 {
489 ARM_COMPUTE_ERROR("Unsupported memory layout.");
490 }
491 }
492 const int x5 = std::floor(((float)o) / interleave_by);
493 const int x4 = h;
494 const int x3 = w;
495 const int x2 = std::floor((float)i / block_by);
496 const int x1 = o % interleave_by;
497 const int x0 = i % block_by;
498 unsigned dst_idx = x5 * H * W * Ip * interleave_by
499 + x4 * W * Ip * interleave_by
500 + x3 * Ip * interleave_by
501 + x2 * interleave_by * block_by
502 + x1 * block_by
503 + x0;
504 dst_ptr[dst_idx] = src_element;
505 }
506}
507
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100508template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000509class VariableWeightsFixtureBaseClass : public framework::Fixture
510{
511public:
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000512 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataLayout data_layout,
513 const DataType data_type)
514 {
515 conv = std::make_unique<ConvolutionFunction>();
516 // prepare data
517 _data_layout = data_layout;
518 // Fixed format kernels for variable weights can work only with NHWC format.
519 ARM_COMPUTE_EXPECT_EQUAL(_data_layout, DataLayout::NHWC, framework::LogLevel::ERRORS);
520 _data_type = data_type;
521 // run the code
522 compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
523 compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation);
524 }
525 void teardown()
526 {
527 _target.allocator()->free();
528 }
529
530protected:
531 template <typename U>
532 void fill(U &&tensor, int i)
533 {
534 switch(tensor.data_type())
535 {
536 case DataType::F16:
537 {
538 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
539 library->fill(tensor, distribution, i);
540 break;
541 }
542 case DataType::F32:
543 {
544 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
545 library->fill(tensor, distribution, i);
546 break;
547 }
548 default:
549 library->fill_tensor_uniform(tensor, i);
550 }
551 }
552
553private:
554 virtual void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
555 const PadStrideInfo &conv_info,
556 const Size2D &dilation) = 0;
557
558 void compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &conv_info,
559 const Size2D &dilation)
560 {
561 // The dataset is always in NCHW format - we need to make C the
562 // innermost dimension because the fixed-format kernel work only
563 // with NHWC layout.
564 permute(input_shape, PermutationVector(2U, 0U, 1U));
565 permute(weights_shape, PermutationVector(2U, 0U, 1U));
566 permute(output_shape, PermutationVector(2U, 0U, 1U));
567 const auto src_tensor_info = TensorInfo(input_shape, 1, _data_type, _data_layout);
568 const auto weight_tensor_info = TensorInfo(weights_shape, 1, _data_type, _data_layout);
569 const auto bias_tensor_info = TensorInfo(bias_shape, 1, _data_type, _data_layout);
570 auto dst_tensor_info = TensorInfo(output_shape, 1, _data_type, _data_layout);
571
572 const int kernel_height = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT)];
573 const int kernel_width = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)];
574 const int num_kernels = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES)];
575
Ramy Elgammal91780022022-07-20 14:57:37 +0100576 const WeightsInfo query_weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, arm_compute::WeightFormat::ANY);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000577 const bool kernel_found = bool(ConvolutionFunction::has_opt_impl(_computed_weight_format, &src_tensor_info, &weight_tensor_info,
578 &bias_tensor_info, &dst_tensor_info, conv_info, query_weights_info));
579 // Make surethat the setup founds a fixed-format kernel as requested by the test case.
580 ARM_COMPUTE_EXPECT(kernel_found, framework::LogLevel::ERRORS);
Ramy Elgammal91780022022-07-20 14:57:37 +0100581 ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(_computed_weight_format), framework::LogLevel::ERRORS);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000582
583 const WeightsInfo weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, _computed_weight_format);
584 configure_and_execute_kernel(src_tensor_info, weight_tensor_info, bias_tensor_info, dst_tensor_info, weights_info, conv_info,
585 dilation);
586 }
587 void compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
588 const Size2D &dilation)
589 {
590 ARM_COMPUTE_UNUSED(input_shape, weights_shape, bias_shape, output_shape, info,
591 dilation);
592
593 // Create reference
594 SimpleTensor<ScalarType> src{ input_shape, _data_type };
595 SimpleTensor<ScalarType> weights{ weights_shape, _data_type };
596 SimpleTensor<ScalarType> bias{ bias_shape, _data_type };
597 fill(src, 0);
598 fill(bias, 1);
599 fill(weights, 3);
600 _reference = reference::convolution_layer<ScalarType>(src, weights, bias, output_shape, info, dilation, 1 /*num_groups*/);
601 }
602 DataLayout _data_layout{};
603 DataType _data_type{};
604
605protected:
606 std::unique_ptr<ConvolutionFunction> conv{};
Ramy Elgammal91780022022-07-20 14:57:37 +0100607 arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000608 TensorClass _target{};
609 SimpleTensor<ScalarType> _reference{};
610};
611
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100612template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
613class VariableWeightsFixture : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000614{
615 void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
616 const PadStrideInfo &conv_info,
617 const Size2D &dilation)
618 {
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100619 this->conv->configure(&src_tensor_info, &weight_tensor_info, &bias_tensor_info, &dst_tensor_info, conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000620
621 // Allocate input tensors
622 auto src = create_tensor<TensorClass>(src_tensor_info);
623 auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
624 const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
625 auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
626 auto bias = create_tensor<TensorClass>(bias_tensor_info);
627 src.allocator()->allocate();
628 weights_original.allocator()->allocate();
629 weights_transformed.allocator()->allocate();
630 bias.allocator()->allocate();
631 // Allocate destination tensor
632 this->_target = create_tensor<TensorClass>(dst_tensor_info);
633 this->_target.allocator()->allocate();
634
635 // Prepare source and biases that are left unchanged.
636 this->fill(AccessorType(src), 0);
637 this->fill(AccessorType(bias), 1);
638
639 // First run
640 this->fill(AccessorType(weights_original), 2);
641 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
642 ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weights_transformed }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &(this->_target) } };
643 this->conv->run(run_pack);
644 // Second run, with new weights
645 this->fill(AccessorType(weights_original), 3);
646 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
647 this->conv->run(run_pack);
648 src.allocator()->free();
649 weights_original.allocator()->free();
650 weights_transformed.allocator()->free();
651 bias.allocator()->free();
652 }
653};
654
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100655template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math>
656class VariableWeightsFixtureNEInterface : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000657{
658 void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info,
659 const PadStrideInfo &conv_info,
660 const Size2D &dilation)
661 {
662 // Allocate input tensors
663 auto src = create_tensor<TensorClass>(src_tensor_info);
664 auto weights_original = create_tensor<TensorClass>(weight_tensor_info);
665 const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format);
666 auto weights_transformed = create_tensor<TensorClass>(new_tensor_info);
667 auto bias = create_tensor<TensorClass>(bias_tensor_info);
668 src.allocator()->allocate();
669 weights_original.allocator()->allocate();
670 weights_transformed.allocator()->allocate();
671 bias.allocator()->allocate();
672 // Allocate destination tensor
673 this->_target = create_tensor<TensorClass>(dst_tensor_info);
674 this->_target.allocator()->allocate();
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100675 this->conv->configure(&src, &weights_transformed, &bias, &(this->_target), conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000676 // Prepare source and biases that are left unchanged.
677 this->fill(AccessorType(src), 0);
678 this->fill(AccessorType(bias), 1);
679
680 // First run
681 this->fill(AccessorType(weights_original), 2);
682 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
683 this->conv->run();
684 // Second run, with new weights
685 this->fill(AccessorType(weights_original), 3);
686 rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format);
687 this->conv->run();
688 src.allocator()->free();
689 weights_original.allocator()->free();
690 weights_transformed.allocator()->free();
691 bias.allocator()->free();
692 }
693};
694
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100695template <typename ConvolutionClass, bool enable_fast_math>
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000696class HasOptImplFixture : public framework::Fixture
697{
698public:
Ramy Elgammal91780022022-07-20 14:57:37 +0100699 void setup(DataType data_type, arm_compute::WeightFormat query_weight_format)
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000700 {
701 auto conv = std::make_unique<ConvolutionClass>();
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100702 const auto src_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
703 const auto weight_info = TensorInfo(TensorShape(64, 3U, 3U, 64U), 1, enable_fast_math ? DataType::BFLOAT16 : data_type, DataLayout::NHWC);
704 const auto bias_info = TensorInfo(TensorShape(64U), 1, data_type, DataLayout::NHWC);
705 auto dst_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
706 const auto conv_info = PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR);
707 const WeightsInfo weights_info(false, 3U, 3U, 64U, false, query_weight_format);
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000708 _kernel_found = bool(ConvolutionClass::has_opt_impl(_computed_weight_format, &src_info, &weight_info,
Pablo Marquez Tello93581a52022-07-21 13:55:27 +0100709 &bias_info, &dst_info, conv_info, weights_info,
710 /*dilation*/ Size2D(1U, 1U), /*act_info*/ ActivationLayerInfo(), enable_fast_math));
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000711 }
712
713protected:
Ramy Elgammal91780022022-07-20 14:57:37 +0100714 bool _kernel_found{ false };
715 arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED };
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000716};
717#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
718
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100719} // namespace validation
720} // namespace test
721} // namespace arm_compute
722#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */