blob: a666ff96a5da3010f575f1ecfefd2327d79153cc [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Matthew Bentham945b8da2023-07-12 11:54:59 +00002 * Copyright (c) 2017-2023 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Giorgio Arenac0f54432018-03-16 14:02:34 +000024#include "arm_compute/core/Helpers.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010025#include "arm_compute/core/TensorShape.h"
26#include "arm_compute/core/Types.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000027#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "tests/AssetsLibrary.h"
29#include "tests/Globals.h"
30#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/framework/Asserts.h"
32#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/validation/Helpers.h"
34#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000035#include "tests/validation/reference/ConvolutionLayer.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000036#include "tests/validation/reference/Permute.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010037
38#include <random>
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
Giorgio Arenac0f54432018-03-16 14:02:34 +000046using namespace arm_compute::misc::shape_calculator;
47
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010048template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Chunosovd621bca2017-11-03 17:33:15 +070049class DirectConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010050{
51public:
Sheri Zhang681f2d42020-02-20 11:23:08 +000052 using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
Georgios Pinitas540d0082017-11-17 10:55:00 +000053
Chunosovd621bca2017-11-03 17:33:15 +070054 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
Manuel Bottinica62c6f2021-03-23 11:50:34 +000055 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010056 {
Chunosovd621bca2017-11-03 17:33:15 +070057 _quantization_info = quantization_info;
58 _data_type = data_type;
Manuel Bottinica62c6f2021-03-23 11:50:34 +000059 _mixed_layout = mixed_layout;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010060
Giorgio Arenac0f54432018-03-16 14:02:34 +000061 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010062 const TensorShape bias_shape(num_kernels);
63 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
Georgios Pinitas540d0082017-11-17 10:55:00 +000064 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010065
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010066 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
67 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
Giorgio Arenac0f54432018-03-16 14:02:34 +000068
Giorgio Arenac0f54432018-03-16 14:02:34 +000069 const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
70
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010071 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
72 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010073 }
74
Alex Gilday7da29b62018-03-23 14:16:00 +000075 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010076 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000077 {
Giorgio Arenac0f54432018-03-16 14:02:34 +000078 ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
Alex Gilday7da29b62018-03-23 14:16:00 +000079 ARM_COMPUTE_UNUSED(dilation);
80
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000081 _quantization_info = quantization_info;
82 _data_type = data_type;
83
84 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
85
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010086 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
87 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000088 }
89
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010090protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +000091 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
92 {
93 DataLayout data_layout = src.info()->data_layout();
94 // Test Multi DataLayout graph cases, when the data layout changes after configure
95 src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
96 dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
97
98 // Compute Convolution function
99 layer.run();
100
101 // Reinstating original data layout for the test suite to properly check the values
102 src.info()->set_data_layout(data_layout);
103 dst.info()->set_data_layout(data_layout);
104 }
105
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100106 template <typename U>
107 void fill(U &&tensor, int i)
108 {
109 switch(tensor.data_type())
110 {
Chunosovd621bca2017-11-03 17:33:15 +0700111 case DataType::QASYMM8:
112 {
Pablo Tello29cab362022-03-10 17:05:34 +0000113 std::uniform_int_distribution<uint32_t> distribution(0, 50);
Chunosovd621bca2017-11-03 17:33:15 +0700114 library->fill(tensor, distribution, i);
115 break;
116 }
Sheri Zhang681f2d42020-02-20 11:23:08 +0000117 case DataType::QASYMM8_SIGNED:
118 {
Sheri Zhang970353e2020-03-19 14:29:49 +0000119 // Use small input range to avoid all the test results being saturated at the end.
Pablo Tello29cab362022-03-10 17:05:34 +0000120 std::uniform_int_distribution<int32_t> distribution(-25, 25);
Sheri Zhang681f2d42020-02-20 11:23:08 +0000121 library->fill(tensor, distribution, i);
122 break;
123 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100124 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000125 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000126 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000127 library->fill(tensor, distribution, i);
128 break;
129 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100130 case DataType::F32:
131 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000132 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100133 library->fill(tensor, distribution, i);
134 break;
135 }
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700136 case DataType::S32:
137 {
Georgios Pinitas6fdfaa82017-11-29 14:27:24 +0000138 std::uniform_int_distribution<int32_t> distribution(-5, 5);
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700139 library->fill(tensor, distribution, i);
140 break;
141 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100142 default:
143 library->fill_tensor_uniform(tensor, i);
144 }
145 }
146
Giorgio Arena563494c2018-04-30 17:29:41 +0100147 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100148 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100149 {
Giorgio Arena563494c2018-04-30 17:29:41 +0100150 if(data_layout == DataLayout::NHWC)
151 {
152 permute(input_shape, PermutationVector(2U, 0U, 1U));
153 permute(weights_shape, PermutationVector(2U, 0U, 1U));
154 permute(output_shape, PermutationVector(2U, 0U, 1U));
155 }
156
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100157 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100158 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
159 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
160 TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
161 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100162
Manuel Bottinif733e032021-05-19 16:15:36 +0100163 add_padding_x({ &src, &bias, &dst }, data_layout);
164 add_padding_x({ &weights }, data_layout, input_shape[0] % 4 == 0); // Don't add left padding if cl image will be used
165
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100166 // Create and configure function
167 FunctionType conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000168 conv.configure(&src, &weights, &bias, &dst, info, act_info);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100169
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100170 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
171 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
172 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
173 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100174
175 // Allocate tensors
176 src.allocator()->allocate();
177 weights.allocator()->allocate();
178 bias.allocator()->allocate();
179 dst.allocator()->allocate();
180
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100181 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
182 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
183 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
184 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100185
186 // Fill tensors
187 fill(AccessorType(src), 0);
188 fill(AccessorType(weights), 1);
189 fill(AccessorType(bias), 2);
190
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000191 if(_mixed_layout)
192 {
193 mix_layout(conv, src, dst);
194 }
195 else
196 {
197 // Compute Convolution function
198 conv.run();
199 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100200
201 return dst;
202 }
203
204 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100205 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100206 {
207 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100208 SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
209 SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
210 SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100211
212 // Fill reference
213 fill(src, 0);
214 fill(weights, 1);
215 fill(bias, 2);
216
Giorgio Arena563494c2018-04-30 17:29:41 +0100217 SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000218 return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100219 }
Chunosovd621bca2017-11-03 17:33:15 +0700220 TensorType _target{};
221 SimpleTensor<T> _reference{};
Chunosovd621bca2017-11-03 17:33:15 +0700222 QuantizationInfo _quantization_info{};
223 DataType _data_type{};
Giorgio Arena63825e82021-03-25 14:54:50 +0000224 bool _mixed_layout{ false };
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100225};
226
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000227template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700228class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100229{
230public:
Giorgio Arenac0f54432018-03-16 14:02:34 +0000231 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
232 DataLayout data_layout)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100233 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100234 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000235 act_info, data_layout, mixed_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100236 }
237};
Chunosovd621bca2017-11-03 17:33:15 +0700238
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000239template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700240class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
241{
242public:
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000243 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000244 ActivationLayerInfo act_info, DataLayout data_layout)
Chunosovd621bca2017-11-03 17:33:15 +0700245 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100246 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000247 act_info, data_layout, mixed_layout);
Chunosovd621bca2017-11-03 17:33:15 +0700248 }
249};
250
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000251template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
252class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
253{
254public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000255 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Giorgio Arenae620a832020-02-17 16:33:20 +0000256 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000257 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100258 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000259 act_info, data_layout);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000260 }
261};
262
263template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
264class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
265{
266public:
Alex Gilday7da29b62018-03-23 14:16:00 +0000267 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000268 DataType data_type, ActivationLayerInfo act_info)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000269 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100270 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(),
Giorgio Arenac0f54432018-03-16 14:02:34 +0000271 act_info, DataLayout::NCHW);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000272 }
273};
274
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100275} // namespace validation
276} // namespace test
277} // namespace arm_compute