blob: 6f204642ca03e53823afd0ad6bb2103a1a118175 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Matthew Bentham945b8da2023-07-12 11:54:59 +00002 * Copyright (c) 2017-2023 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gunes Bayirdfcd41a2023-10-11 09:56:05 +010024
25#ifndef ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
26#define ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H
27
Giorgio Arenac0f54432018-03-16 14:02:34 +000028#include "arm_compute/core/Helpers.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010029#include "arm_compute/core/TensorShape.h"
30#include "arm_compute/core/Types.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000031#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010032#include "tests/AssetsLibrary.h"
33#include "tests/Globals.h"
34#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/framework/Asserts.h"
36#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010037#include "tests/validation/Helpers.h"
38#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000039#include "tests/validation/reference/ConvolutionLayer.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000040#include "tests/validation/reference/Permute.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010041
42#include <random>
43
44namespace arm_compute
45{
46namespace test
47{
48namespace validation
49{
Giorgio Arenac0f54432018-03-16 14:02:34 +000050using namespace arm_compute::misc::shape_calculator;
51
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010052template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Chunosovd621bca2017-11-03 17:33:15 +070053class DirectConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010054{
55public:
Sheri Zhang681f2d42020-02-20 11:23:08 +000056 using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
Georgios Pinitas540d0082017-11-17 10:55:00 +000057
Gunes Bayirdfcd41a2023-10-11 09:56:05 +010058 void setup_quantization(const TensorShape &input_shape, const TensorShape &weights_shape, QuantizationInfo &input_q_info,
59 QuantizationInfo &weights_q_info, DataType data_type)
60 {
61 const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
62 const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
63
64 std::mt19937 generator(library->seed() + _hash);
65 std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
66 std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
67
68 const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
69 const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
70
71 const int32_t offset_lhs = distribution_t(generator);
72 const int32_t offset_rhs = distribution_t(generator);
73
74 input_q_info = QuantizationInfo(scale_lhs, offset_lhs);
75 weights_q_info = QuantizationInfo(scale_rhs, offset_rhs);
76
77 QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info,
78 weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */,
79 data_type, 0.5f /* bias_fraction */);
80
81 _dst_q_info = q_hint.q_info;
82 _min_bias = q_hint.bias_min;
83 _max_bias = q_hint.bias_max;
84
85 // Do not change here as these limits are the natural limits of the associated data types and
86 // are embeded in the computation of the dst quantization info.
87 _min_u8 = 0;
88 _max_u8 = 255;
89 _min_s8 = -128;
90 _max_s8 = 127;
91 }
92
Chunosovd621bca2017-11-03 17:33:15 +070093 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
Manuel Bottinica62c6f2021-03-23 11:50:34 +000094 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010095 {
Gunes Bayirdfcd41a2023-10-11 09:56:05 +010096 // This hash is used by random generators. There may be hash collisions but
97 // this is intentional as it's a very easy way to make the the current
98 // random generation process almost different for many test configurations,
99 // which were using the same set of values before.
100 _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
101 stride_x + stride_y + pad_x + pad_y + kernel_size + num_kernels + mixed_layout
102 + (data_layout == DataLayout::NHWC);
103
Chunosovd621bca2017-11-03 17:33:15 +0700104 _data_type = data_type;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000105 _mixed_layout = mixed_layout;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100106
Giorgio Arenac0f54432018-03-16 14:02:34 +0000107 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100108 const TensorShape bias_shape(num_kernels);
109 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
Georgios Pinitas540d0082017-11-17 10:55:00 +0000110 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100111
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100112 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
113 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000114
Giorgio Arenac0f54432018-03-16 14:02:34 +0000115 const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
116
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100117 QuantizationInfo input_q_info = quantization_info;
118 QuantizationInfo weights_q_info = quantization_info;
119 _dst_q_info = quantization_info;
120
121 if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
122 {
123 setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type);
124 }
125
126 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout);
127 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100128 }
129
Alex Gilday7da29b62018-03-23 14:16:00 +0000130 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100131 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000132 {
Giorgio Arenac0f54432018-03-16 14:02:34 +0000133 ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
Alex Gilday7da29b62018-03-23 14:16:00 +0000134 ARM_COMPUTE_UNUSED(dilation);
135
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100136 // This hash is used by random generators. There may be hash collisions but
137 // this is intentional as it's a very easy way to make the the current
138 // random generation process almost different for many test configurations,
139 // which were using the same set of values before.
140 _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] +
141 weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] + dilation.x() +
142 dilation.y() + info.pad_bottom() + info.pad_left() + info.pad_right() + info.pad_top();
143
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000144 _data_type = data_type;
145
146 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
147
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100148 QuantizationInfo input_q_info = quantization_info;
149 QuantizationInfo weights_q_info = quantization_info;
150 _dst_q_info = quantization_info;
151
152 if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
153 {
154 setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type);
155 }
156
157 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout);
158 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000159 }
160
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100161protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000162 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
163 {
164 DataLayout data_layout = src.info()->data_layout();
165 // Test Multi DataLayout graph cases, when the data layout changes after configure
166 src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
167 dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
168
169 // Compute Convolution function
170 layer.run();
171
172 // Reinstating original data layout for the test suite to properly check the values
173 src.info()->set_data_layout(data_layout);
174 dst.info()->set_data_layout(data_layout);
175 }
176
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100177 template <typename U>
178 void fill(U &&tensor, int i)
179 {
180 switch(tensor.data_type())
181 {
Chunosovd621bca2017-11-03 17:33:15 +0700182 case DataType::QASYMM8:
183 {
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100184 std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8);
Chunosovd621bca2017-11-03 17:33:15 +0700185 library->fill(tensor, distribution, i);
186 break;
187 }
Sheri Zhang681f2d42020-02-20 11:23:08 +0000188 case DataType::QASYMM8_SIGNED:
189 {
Sheri Zhang970353e2020-03-19 14:29:49 +0000190 // Use small input range to avoid all the test results being saturated at the end.
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100191 std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8);
Sheri Zhang681f2d42020-02-20 11:23:08 +0000192 library->fill(tensor, distribution, i);
193 break;
194 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100195 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000196 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000197 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000198 library->fill(tensor, distribution, i);
199 break;
200 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100201 case DataType::F32:
202 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000203 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100204 library->fill(tensor, distribution, i);
205 break;
206 }
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700207 case DataType::S32:
208 {
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100209 std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700210 library->fill(tensor, distribution, i);
211 break;
212 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100213 default:
214 library->fill_tensor_uniform(tensor, i);
215 }
216 }
217
Giorgio Arena563494c2018-04-30 17:29:41 +0100218 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100219 DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100220 {
Giorgio Arena563494c2018-04-30 17:29:41 +0100221 if(data_layout == DataLayout::NHWC)
222 {
223 permute(input_shape, PermutationVector(2U, 0U, 1U));
224 permute(weights_shape, PermutationVector(2U, 0U, 1U));
225 permute(output_shape, PermutationVector(2U, 0U, 1U));
226 }
227
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100228 // Create tensors
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100229 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_q_info, data_layout);
230 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, weights_q_info, data_layout);
231 TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, QuantizationInfo());
232 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _dst_q_info, data_layout);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100233
Manuel Bottinif733e032021-05-19 16:15:36 +0100234 add_padding_x({ &src, &bias, &dst }, data_layout);
235 add_padding_x({ &weights }, data_layout, input_shape[0] % 4 == 0); // Don't add left padding if cl image will be used
236
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100237 // Create and configure function
238 FunctionType conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000239 conv.configure(&src, &weights, &bias, &dst, info, act_info);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100240
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100241 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
242 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
243 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
244 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100245
246 // Allocate tensors
247 src.allocator()->allocate();
248 weights.allocator()->allocate();
249 bias.allocator()->allocate();
250 dst.allocator()->allocate();
251
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100252 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
253 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
254 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
255 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100256
257 // Fill tensors
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100258 fill(AccessorType(src), 0 + _hash);
259 fill(AccessorType(weights), 1 + _hash);
260 fill(AccessorType(bias), 2 + _hash);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100261
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000262 if(_mixed_layout)
263 {
264 mix_layout(conv, src, dst);
265 }
266 else
267 {
268 // Compute Convolution function
269 conv.run();
270 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100271
272 return dst;
273 }
274
275 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100276 DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100277 {
278 // Create reference
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100279 SimpleTensor<T> src{ input_shape, data_type, 1, input_q_info };
280 SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_q_info };
281 SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, QuantizationInfo() };
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100282
283 // Fill reference
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100284 fill(src, 0 + _hash);
285 fill(weights, 1 + _hash);
286 fill(bias, 2 + _hash);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100287
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100288 SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info,
289 Size2D(1U, 1U) /* dilation */, 1 /* num_groups */, _dst_q_info);
290 SimpleTensor<T> dst2 = (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
291 return dst2;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100292 }
Chunosovd621bca2017-11-03 17:33:15 +0700293 TensorType _target{};
294 SimpleTensor<T> _reference{};
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100295 QuantizationInfo _dst_q_info{};
Chunosovd621bca2017-11-03 17:33:15 +0700296 DataType _data_type{};
Giorgio Arena63825e82021-03-25 14:54:50 +0000297 bool _mixed_layout{ false };
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100298 int32_t _hash{0};
299
300 // Random initialization limits
301 // Default values are previously handcrafted limits
302 // that sould be used when we don't use dynamic quantization
303 int32_t _min_bias{-5};
304 int32_t _max_bias{5};
305 int32_t _min_u8{0};
306 int32_t _max_u8{50};
307 int32_t _min_s8{-25};
308 int32_t _max_s8{25};
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100309};
310
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000311template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700312class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100313{
314public:
Giorgio Arenac0f54432018-03-16 14:02:34 +0000315 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
316 DataLayout data_layout)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100317 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100318 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000319 act_info, data_layout, mixed_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100320 }
321};
Chunosovd621bca2017-11-03 17:33:15 +0700322
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000323template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700324class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
325{
326public:
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000327 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000328 ActivationLayerInfo act_info, DataLayout data_layout)
Chunosovd621bca2017-11-03 17:33:15 +0700329 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100330 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000331 act_info, data_layout, mixed_layout);
Chunosovd621bca2017-11-03 17:33:15 +0700332 }
333};
334
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000335template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
336class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
337{
338public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000339 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Giorgio Arenae620a832020-02-17 16:33:20 +0000340 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000341 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100342 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000343 act_info, data_layout);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000344 }
345};
346
347template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
348class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
349{
350public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000351 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000352 DataType data_type, ActivationLayerInfo act_info)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000353 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100354 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(),
Giorgio Arenac0f54432018-03-16 14:02:34 +0000355 act_info, DataLayout::NCHW);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000356 }
357};
358
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100359} // namespace validation
360} // namespace test
361} // namespace arm_compute
Gunes Bayirdfcd41a2023-10-11 09:56:05 +0100362
363#endif // ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H