blob: 947dc10da25f678bc8ddd646a8bd4a05ed8b44be [file] [log] [blame]
Pablo Tellof5f34bb2017-08-22 13:34:13 +01001/*
Viet-Hoa Do019a7d92023-06-27 16:33:57 +01002 * Copyright (c) 2017-2023 Arm Limited.
Pablo Tellof5f34bb2017-08-22 13:34:13 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/TensorShape.h"
25#include "arm_compute/core/Types.h"
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010026#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tellof5f34bb2017-08-22 13:34:13 +010027#include "tests/AssetsLibrary.h"
28#include "tests/Globals.h"
29#include "tests/IAccessor.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Fixture.h"
Pablo Tellof5f34bb2017-08-22 13:34:13 +010032#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000033#include "tests/validation/reference/DeconvolutionLayer.h"
Pablo Tellof5f34bb2017-08-22 13:34:13 +010034
35#include <random>
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010043using namespace arm_compute::misc::shape_calculator;
44
Freddie Liardet9d061b02021-04-06 15:59:28 +010045template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
Pablo Tellof5f34bb2017-08-22 13:34:13 +010046class DeconvolutionLayerFixtureBase : public framework::Fixture
47{
48public:
Luca Foschianifedefc32020-02-17 17:02:49 +000049 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type;
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +010050
51public:
Pablo Tellof5f34bb2017-08-22 13:34:13 +010052 template <typename...>
53 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
Freddie Liardet9d061b02021-04-06 15:59:28 +010054 DataType data_type, DataType weights_data_type, DataLayout data_layout,
55 QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, QuantizationInfo weights_quantization_info, bool add_bias)
Pablo Tellof5f34bb2017-08-22 13:34:13 +010056 {
Freddie Liardet9d061b02021-04-06 15:59:28 +010057 _data_type = data_type;
58 _weights_data_type = weights_data_type;
59 _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
60 _data_layout = data_layout;
61 _input_quantization_info = input_quantization_info;
62 _output_quantization_info = output_quantization_info;
63 _weights_quantization_info = weights_quantization_info;
Pablo Tellof5f34bb2017-08-22 13:34:13 +010064
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +010065 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, add_bias);
66 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, add_bias);
Pablo Tellof5f34bb2017-08-22 13:34:13 +010067 }
68
69protected:
70 template <typename U>
71 void fill(U &&tensor, int i)
72 {
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +010073 switch(tensor.data_type())
Pablo Tellof5f34bb2017-08-22 13:34:13 +010074 {
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +010075 case DataType::QASYMM8:
76 {
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010077 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
Pablo Tello29cab362022-03-10 17:05:34 +000078 std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +010079 library->fill(tensor, distribution, i);
80 break;
81 }
Sheri Zhanga14817a2020-02-26 10:30:15 +000082 case DataType::QASYMM8_SIGNED:
83 {
84 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
Pablo Tello29cab362022-03-10 17:05:34 +000085 std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
Sheri Zhanga14817a2020-02-26 10:30:15 +000086 library->fill(tensor, distribution, i);
87 break;
88 }
Freddie Liardet9d061b02021-04-06 15:59:28 +010089 case DataType::QSYMM8_PER_CHANNEL:
90 {
91 int min_bound = 128;
92 int max_bound = -127;
93 for(size_t i = 0; i < _input_quantization_info.scale().size(); i++)
94 {
95 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f);
96 if(bounds.first < min_bound)
97 {
98 min_bound = bounds.first;
99 }
100 if(bounds.second > max_bound)
101 {
102 max_bound = bounds.second;
103 }
104 }
Pablo Tello29cab362022-03-10 17:05:34 +0000105 std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
Freddie Liardet9d061b02021-04-06 15:59:28 +0100106 library->fill(tensor, distribution, i);
107 break;
108 }
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100109 case DataType::S32:
110 {
111 std::uniform_int_distribution<int32_t> distribution(-100, 100);
112 library->fill(tensor, distribution, i);
113 break;
114 }
115 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000116 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000117 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000118 library->fill(tensor, distribution, i);
119 break;
120 }
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100121 case DataType::F32:
122 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000123 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100124 library->fill(tensor, distribution, i);
125 break;
126 }
127 default:
128 library->fill_tensor_uniform(tensor, i);
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100129 }
130 }
Michalis Spyrou780db4e2017-11-23 09:49:51 +0000131
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100132 template <typename U>
133 void fill_zeros(U &&tensor)
134 {
135 switch(tensor.data_type())
136 {
137 case DataType::S32:
138 {
Freddie Liardete92b0452021-04-22 14:55:17 +0100139 library->fill_tensor_value(tensor, 0);
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100140 break;
141 }
142 case DataType::F16:
143 library->fill_tensor_value(tensor, static_cast<half>(0.0f));
144 break;
145 case DataType::F32:
146 library->fill_tensor_value(tensor, static_cast<float>(0.0f));
147 break;
148 default:
149 ARM_COMPUTE_ERROR("Not supported");
150 }
151 }
152
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100153 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape bias_shape, TensorShape output_shape,
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100154 const PadStrideInfo &info, bool add_bias)
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100155 {
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100156 if(_data_layout == DataLayout::NHWC)
157 {
158 permute(input_shape, PermutationVector(2U, 0U, 1U));
159 permute(weights_shape, PermutationVector(2U, 0U, 1U));
160 permute(output_shape, PermutationVector(2U, 0U, 1U));
161 }
162
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100163 // Create tensors
Manuel Bottini279814b2019-10-25 10:28:28 +0100164 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_quantization_info, _data_layout);
Freddie Liardet9d061b02021-04-06 15:59:28 +0100165 TensorType weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout);
Manuel Bottini279814b2019-10-25 10:28:28 +0100166 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _input_quantization_info, _data_layout);
167 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _output_quantization_info, _data_layout);
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100168
169 // Create and configure function
170 FunctionType conv;
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100171 conv.configure(&src, &weights, add_bias ? &bias : nullptr, &dst, info);
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100172
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100173 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
174 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100175 if(add_bias)
176 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100177 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100178 }
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100179 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100180
181 // Allocate tensors
182 src.allocator()->allocate();
183 weights.allocator()->allocate();
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100184 if(add_bias)
185 {
186 bias.allocator()->allocate();
187 }
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100188 dst.allocator()->allocate();
189
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100190 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
191 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100192 if(add_bias)
193 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100194 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100195 }
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100196 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100197
198 // Fill tensors
199 fill(AccessorType(src), 0);
200 fill(AccessorType(weights), 1);
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100201 if(add_bias)
202 {
203 fill(AccessorType(bias), 2);
204 }
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100205
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100206 // Compute DeconvolutionLayer function
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100207 conv.run();
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100208 return dst;
209 }
210
211 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100212 const PadStrideInfo &info, bool add_bias)
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100213 {
214 // Create reference
Manuel Bottini279814b2019-10-25 10:28:28 +0100215 SimpleTensor<T> src{ input_shape, _data_type, 1, _input_quantization_info };
Freddie Liardet9d061b02021-04-06 15:59:28 +0100216 SimpleTensor<TW> weights{ weights_shape, _weights_data_type, 1, _weights_quantization_info };
Manuel Bottini279814b2019-10-25 10:28:28 +0100217 SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _input_quantization_info };
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100218
219 // Fill reference
220 fill(src, 0);
221 fill(weights, 1);
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100222
223 if(add_bias)
224 {
225 fill(bias, 2);
226 }
227 else
228 {
229 fill_zeros(bias);
230 }
Freddie Liardet9d061b02021-04-06 15:59:28 +0100231 return reference::deconvolution_layer<T, TW>(src, weights, bias, output_shape, info, _output_quantization_info);
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100232 }
233
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100234 TensorType _target{};
235 SimpleTensor<T> _reference{};
236 DataType _data_type{};
Freddie Liardet9d061b02021-04-06 15:59:28 +0100237 DataType _weights_data_type{};
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100238 DataType _bias_data_type{};
239 DataLayout _data_layout{};
Manuel Bottini279814b2019-10-25 10:28:28 +0100240 QuantizationInfo _input_quantization_info{};
241 QuantizationInfo _output_quantization_info{};
Freddie Liardet9d061b02021-04-06 15:59:28 +0100242 QuantizationInfo _weights_quantization_info{};
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100243};
244
245template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
Freddie Liardet9d061b02021-04-06 15:59:28 +0100246class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100247{
248public:
249 template <typename...>
250 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
Manuel Bottini9f0d5ec2019-08-19 13:31:38 +0100251 unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100252 {
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100253 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
254 const TensorShape bias_shape(num_kernels);
255 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100256 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info);
257 TensorInfo input_info(input_shape, 1, data_type);
258 TensorInfo weights_info(weights_shape, 1, data_type);
259 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
Freddie Liardet9d061b02021-04-06 15:59:28 +0100260 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(),
261 QuantizationInfo(), QuantizationInfo(), add_bias);
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100262 }
263};
264
265template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
Freddie Liardet9d061b02021-04-06 15:59:28 +0100266class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100267{
268public:
269 template <typename...>
270 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top,
271 unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
272 {
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100273 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
274 const TensorShape bias_shape(num_kernels);
275 const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL);
276 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info);
giuros01a69a88b2019-01-31 16:29:19 +0000277 TensorInfo input_info(input_shape, 1, data_type);
278 TensorInfo weights_info(weights_shape, 1, data_type);
279 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
Freddie Liardet9d061b02021-04-06 15:59:28 +0100280 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(),
281 QuantizationInfo(), QuantizationInfo(), add_bias);
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100282 }
283};
284
285template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y>
Freddie Liardet9d061b02021-04-06 15:59:28 +0100286class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100287{
288public:
289 template <typename...>
290 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
Manuel Bottini279814b2019-10-25 10:28:28 +0100291 unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias)
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100292 {
Michele Di Giorgio9fef38a2018-07-06 18:06:58 +0100293 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
294 const TensorShape bias_shape(num_kernels);
295 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
Matthew Jacksonb9070a42019-08-22 16:13:27 +0100296 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info);
Manuel Bottini279814b2019-10-25 10:28:28 +0100297 TensorInfo input_info(input_shape, 1, data_type, input_quantization_info);
298 TensorInfo weights_info(weights_shape, 1, data_type, input_quantization_info);
giuros01a69a88b2019-01-31 16:29:19 +0000299 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
Freddie Liardet9d061b02021-04-06 15:59:28 +0100300 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout,
301 input_quantization_info,
302 output_quantization_info, input_quantization_info, add_bias);
303 }
304};
305
306template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, unsigned int kernel_size_x, unsigned int kernel_size_y>
307class DeconvolutionValidationQuantizedPerChannelFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>
308{
309public:
310 template <typename...>
311 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
312 unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias,
313 DataType weights_data_type)
314 {
Freddie Liardet9d061b02021-04-06 15:59:28 +0100315 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
316 const TensorShape bias_shape(num_kernels);
317 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
318 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info);
319 TensorInfo input_info(input_shape, 1, data_type, input_quantization_info);
320 TensorInfo weights_info(weights_shape, 1, weights_data_type, input_quantization_info);
321 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info);
322
323 std::vector<float> weights_scales{};
324 std::mt19937 gen(library->seed());
325 std::uniform_real_distribution<float> dis(0.01f, 1.f);
326 for(size_t i = 0; i < output_shape[2]; ++i)
327 {
328 weights_scales.push_back(dis(gen));
329 }
330 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, weights_data_type, data_layout,
331 input_quantization_info,
332 output_quantization_info, QuantizationInfo(weights_scales), add_bias);
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100333 }
334};
335
336} // namespace validation
337} // namespace test
338} // namespace arm_compute