blob: 07790e84d98d4dc108161e01a0e63d4af26d006f [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2017-2021 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010029#include "arm_compute/runtime/NEON/NEScheduler.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Michalis Spyroue2503892018-04-23 15:17:31 +010038#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000039#include "tests/validation/reference/Utils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010040
41#include <random>
42
43namespace arm_compute
44{
45namespace test
46{
47namespace validation
48{
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000049namespace detail
50{
51template <typename ConvolutionFunction, typename TensorType>
52void configure_conv_function(ConvolutionFunction &func,
53 TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst,
54 const PadStrideInfo &info, const WeightsInfo &weights_info,
55 const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
56{
57 func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, num_groups);
58}
59} // namespace detail
60
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010061template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
Chunosov5124be52017-11-22 20:42:13 +070062class ConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063{
64public:
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000065 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
66 || std::is_same<typename std::decay<T>::type, int8_t>::value,
67 int32_t, T >::type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010068
Chunosov5124be52017-11-22 20:42:13 +070069public:
70 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000071 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
Manuel Bottinica62c6f2021-03-23 11:50:34 +000072 DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false)
Chunosov5124be52017-11-22 20:42:13 +070073 {
Manuel Bottinica62c6f2021-03-23 11:50:34 +000074 _mixed_layout = mixed_layout;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010075 _data_type = data_type;
76 _weights_data_type = weights_data_type;
77 _is_quantized = is_data_type_quantized_asymmetric(data_type);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +000078 _is_bfloat16 = data_type == DataType::BFLOAT16;
79 _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
80 _output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010081 _quantization_info = quantization_info;
82 _weight_quantization_info = weight_quantization_info;
83 _data_layout = data_layout;
Chunosov5124be52017-11-22 20:42:13 +070084
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000085 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
86 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010087 }
88
89protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +000090
91 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
92 {
93 // Test Multi DataLayout graph cases, when the data layout changes after configure
94 src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
95 dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
96
97 // Compute Convolution function
98 layer.run();
99
100 // Reinstating original data layout for the test suite to properly check the values
101 src.info()->set_data_layout(_data_layout);
102 dst.info()->set_data_layout(_data_layout);
103 }
104
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000105 void regularize_values(void *values, size_t size)
106 {
107 float *fvalues = static_cast<float *>(values);
108 for(size_t i = 0; i < size; ++i)
109 {
110 fvalues[i] = float(bfloat16(fvalues[i]));
111 }
112 }
113
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100114 template <typename U>
115 void fill(U &&tensor, int i)
116 {
117 switch(tensor.data_type())
118 {
Chunosov5124be52017-11-22 20:42:13 +0700119 case DataType::QASYMM8:
120 {
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100121 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
122 std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
Chunosov5124be52017-11-22 20:42:13 +0700123 library->fill(tensor, distribution, i);
124 break;
125 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000126 case DataType::QASYMM8_SIGNED:
127 {
128 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
129 std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
130 library->fill(tensor, distribution, i);
131 break;
132 }
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100133 case DataType::QSYMM8_PER_CHANNEL:
134 {
135 int min_bound = 128;
136 int max_bound = -127;
137 for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++)
138 {
139 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
140 if(bounds.first < min_bound)
141 {
142 min_bound = bounds.first;
143 }
144 if(bounds.second > max_bound)
145 {
146 max_bound = bounds.second;
147 }
148 }
149 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
150 library->fill(tensor, distribution, i);
151 break;
152 }
Chunosov5124be52017-11-22 20:42:13 +0700153 case DataType::S32:
154 {
155 std::uniform_int_distribution<int32_t> distribution(-100, 100);
156 library->fill(tensor, distribution, i);
157 break;
158 }
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000159 case DataType::BFLOAT16:
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000160 {
161 arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f };
162 library->fill(tensor, distribution, i);
163 break;
164 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100165 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000166 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000167 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000168 library->fill(tensor, distribution, i);
169 break;
170 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100171 case DataType::F32:
172 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000173 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100174 library->fill(tensor, distribution, i);
175 break;
176 }
177 default:
178 library->fill_tensor_uniform(tensor, i);
179 }
180 }
181
Michalis Spyroue2503892018-04-23 15:17:31 +0100182 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000183 bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100184 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100185 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
186
187 const unsigned int num_groups = input_shape[2] / weights_shape[2];
188
Michalis Spyroue2503892018-04-23 15:17:31 +0100189 if(_data_layout == DataLayout::NHWC)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100190 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100191 permute(input_shape, PermutationVector(2U, 0U, 1U));
192 permute(weights_shape, PermutationVector(2U, 0U, 1U));
193 permute(output_shape, PermutationVector(2U, 0U, 1U));
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100194 }
195
Michalis Spyroue2503892018-04-23 15:17:31 +0100196 const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
197 const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
198
199 WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
200 TensorShape reshaped_weights_shape(weights_shape);
201
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100202 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100203 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100204 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100205 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000206 TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100207
208 // Create and configure function
209 FunctionType conv;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000210 detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100211
212 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
213 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
214 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
215 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
216
217 // Allocate tensors
218 src.allocator()->allocate();
219 weights.allocator()->allocate();
220 bias.allocator()->allocate();
221 dst.allocator()->allocate();
222
223 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
224 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
225 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
226 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
227
228 // Fill tensors
229 fill(AccessorType(src), 0);
Michalis Spyroue2503892018-04-23 15:17:31 +0100230 fill(AccessorType(weights), 1);
231 fill(AccessorType(bias), 2);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100232
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000233 if(_mixed_layout)
234 {
235 mix_layout(conv, src, dst);
236 }
237 else
238 {
239 // Compute Convolution function
240 conv.run();
241 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100242
243 return dst;
244 }
245
Alex Gilday7da29b62018-03-23 14:16:00 +0000246 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000247 const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100248 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100249 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
250
251 const unsigned int num_groups = input_shape[2] / weights_shape[2];
252
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000253 // Setup reference data types
254 const DataType src_dt = _is_bfloat16 ? DataType::F32 : _data_type;
255 const DataType weights_dt = _is_bfloat16 ? DataType::F32 : _weights_data_type;
256 const DataType bias_dt = _is_bfloat16 ? DataType::F32 : _bias_data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100257
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000258 // Create reference
259 SimpleTensor<T> src{ input_shape, src_dt, 1, _quantization_info };
260 SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
261 SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
262
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100263 fill(src, 0);
264 fill(weights, 1);
265 fill(bias, 2);
266
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000267 // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
268 if(_is_bfloat16)
269 {
270 regularize_values(static_cast<void *>(src.data()), src.num_elements());
271 regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
272 }
273
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100274 return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000275 act_info) :
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100276 reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100277 }
278
Chunosov5124be52017-11-22 20:42:13 +0700279 TensorType _target{};
280 SimpleTensor<T> _reference{};
281 DataType _data_type{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100282 DataType _weights_data_type{};
Chunosov5124be52017-11-22 20:42:13 +0700283 DataType _bias_data_type{};
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000284 DataType _output_data_type{};
Michalis Spyroue2503892018-04-23 15:17:31 +0100285 DataLayout _data_layout{};
Chunosov5124be52017-11-22 20:42:13 +0700286 QuantizationInfo _quantization_info{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100287 QuantizationInfo _weight_quantization_info{};
Chunosov5124be52017-11-22 20:42:13 +0700288 bool _is_quantized = false;
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000289 bool _is_bfloat16 = false;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000290 bool _mixed_layout = false;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100291};
292
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000293template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100294class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100295{
296public:
297 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000298 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100299 DataLayout data_layout, ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100300 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100301 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
302 data_type, data_type, data_layout,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000303 QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
Chunosov5124be52017-11-22 20:42:13 +0700304 }
305};
306
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000307template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100308class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Chunosov5124be52017-11-22 20:42:13 +0700309{
310public:
311 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000312 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100313 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700314 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100315 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000316 data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100317 }
318};
319
320template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
321class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
322{
323public:
324 template <typename...>
325 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
326 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
327 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000328 std::vector<float> weights_scales{};
329 std::mt19937 gen(library->seed());
330 std::uniform_real_distribution<float> dis(0.01f, 1.f);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100331 for(size_t i = 0; i < output_shape[2]; ++i)
332 {
333 weights_scales.push_back(dis(gen));
334 }
335 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
336 reshape_weights, data_type, weights_data_type, data_layout,
337 quantization_info, QuantizationInfo(weights_scales), act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100338 }
339};
340} // namespace validation
341} // namespace test
342} // namespace arm_compute
343#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */