blob: b4abebe18dd2ada2e8a8fdfe68229c4c943a6c8a [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Georgios Pinitasc7b183a2020-03-06 18:12:09 +00002 * Copyright (c) 2017-2020 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010029#include "arm_compute/runtime/NEON/NEScheduler.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Michalis Spyroue2503892018-04-23 15:17:31 +010038#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000039#include "tests/validation/reference/Utils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010040
41#include <random>
42
43namespace arm_compute
44{
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010045class NEConvolutionLayer;
46
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010047namespace test
48{
49namespace validation
50{
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010051template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
Chunosov5124be52017-11-22 20:42:13 +070052class ConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010053{
54public:
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000055 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
56 || std::is_same<typename std::decay<T>::type, int8_t>::value,
57 int32_t, T >::type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010058
Chunosov5124be52017-11-22 20:42:13 +070059public:
60 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000061 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010062 DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +070063 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010064 _data_type = data_type;
65 _weights_data_type = weights_data_type;
66 _is_quantized = is_data_type_quantized_asymmetric(data_type);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +000067 _is_bfloat16 = data_type == DataType::BFLOAT16;
68 _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type);
69 _output_data_type = _is_bfloat16 ? DataType::F32 : data_type;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010070 _quantization_info = quantization_info;
71 _weight_quantization_info = weight_quantization_info;
72 _data_layout = data_layout;
Chunosov5124be52017-11-22 20:42:13 +070073
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000074 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
75 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010076 }
77
78protected:
Georgios Pinitasc7b183a2020-03-06 18:12:09 +000079 void regularize_values(void *values, size_t size)
80 {
81 float *fvalues = static_cast<float *>(values);
82 for(size_t i = 0; i < size; ++i)
83 {
84 fvalues[i] = float(bfloat16(fvalues[i]));
85 }
86 }
87
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010088 template <typename U>
89 void fill(U &&tensor, int i)
90 {
91 switch(tensor.data_type())
92 {
Chunosov5124be52017-11-22 20:42:13 +070093 case DataType::QASYMM8:
94 {
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010095 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
96 std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
Chunosov5124be52017-11-22 20:42:13 +070097 library->fill(tensor, distribution, i);
98 break;
99 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000100 case DataType::QASYMM8_SIGNED:
101 {
102 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
103 std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
104 library->fill(tensor, distribution, i);
105 break;
106 }
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100107 case DataType::QSYMM8_PER_CHANNEL:
108 {
109 int min_bound = 128;
110 int max_bound = -127;
111 for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++)
112 {
113 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
114 if(bounds.first < min_bound)
115 {
116 min_bound = bounds.first;
117 }
118 if(bounds.second > max_bound)
119 {
120 max_bound = bounds.second;
121 }
122 }
123 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
124 library->fill(tensor, distribution, i);
125 break;
126 }
Chunosov5124be52017-11-22 20:42:13 +0700127 case DataType::S32:
128 {
129 std::uniform_int_distribution<int32_t> distribution(-100, 100);
130 library->fill(tensor, distribution, i);
131 break;
132 }
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000133 case DataType::BFLOAT16:
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100134 case DataType::F16:
135 case DataType::F32:
136 {
137 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
138 library->fill(tensor, distribution, i);
139 break;
140 }
141 default:
142 library->fill_tensor_uniform(tensor, i);
143 }
144 }
145
Michalis Spyroue2503892018-04-23 15:17:31 +0100146 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000147 bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100148 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100149 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
150
151 const unsigned int num_groups = input_shape[2] / weights_shape[2];
152
Michalis Spyroue2503892018-04-23 15:17:31 +0100153 if(_data_layout == DataLayout::NHWC)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100154 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100155 permute(input_shape, PermutationVector(2U, 0U, 1U));
156 permute(weights_shape, PermutationVector(2U, 0U, 1U));
157 permute(output_shape, PermutationVector(2U, 0U, 1U));
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100158 }
159
Michalis Spyroue2503892018-04-23 15:17:31 +0100160 const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
161 const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
162
163 WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
164 TensorShape reshaped_weights_shape(weights_shape);
165
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100166 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100167 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100168 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100169 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000170 TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100171
172 // Create and configure function
173 FunctionType conv;
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100174 conv.configure(&src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100175
176 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
177 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
178 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
179 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
180
181 // Allocate tensors
182 src.allocator()->allocate();
183 weights.allocator()->allocate();
184 bias.allocator()->allocate();
185 dst.allocator()->allocate();
186
187 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
188 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
189 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
190 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
191
192 // Fill tensors
193 fill(AccessorType(src), 0);
Michalis Spyroue2503892018-04-23 15:17:31 +0100194 fill(AccessorType(weights), 1);
195 fill(AccessorType(bias), 2);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100196
197 // Compute NEConvolutionLayer function
198 conv.run();
199
200 return dst;
201 }
202
Alex Gilday7da29b62018-03-23 14:16:00 +0000203 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000204 const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100205 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100206 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
207
208 const unsigned int num_groups = input_shape[2] / weights_shape[2];
209
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000210 // Setup reference data types
211 const DataType src_dt = _is_bfloat16 ? DataType::F32 : _data_type;
212 const DataType weights_dt = _is_bfloat16 ? DataType::F32 : _weights_data_type;
213 const DataType bias_dt = _is_bfloat16 ? DataType::F32 : _bias_data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100214
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000215 // Create reference
216 SimpleTensor<T> src{ input_shape, src_dt, 1, _quantization_info };
217 SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info };
218 SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info };
219
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100220 fill(src, 0);
221 fill(weights, 1);
222 fill(bias, 2);
223
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000224 // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output
225 if(_is_bfloat16)
226 {
227 regularize_values(static_cast<void *>(src.data()), src.num_elements());
228 regularize_values(static_cast<void *>(weights.data()), weights.num_elements());
229 }
230
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100231 return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000232 act_info) :
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100233 reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100234 }
235
Chunosov5124be52017-11-22 20:42:13 +0700236 TensorType _target{};
237 SimpleTensor<T> _reference{};
238 DataType _data_type{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100239 DataType _weights_data_type{};
Chunosov5124be52017-11-22 20:42:13 +0700240 DataType _bias_data_type{};
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000241 DataType _output_data_type{};
Michalis Spyroue2503892018-04-23 15:17:31 +0100242 DataLayout _data_layout{};
Chunosov5124be52017-11-22 20:42:13 +0700243 QuantizationInfo _quantization_info{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100244 QuantizationInfo _weight_quantization_info{};
Chunosov5124be52017-11-22 20:42:13 +0700245 bool _is_quantized = false;
Georgios Pinitasc7b183a2020-03-06 18:12:09 +0000246 bool _is_bfloat16 = false;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100247};
248
249template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100250class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100251{
252public:
253 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000254 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100255 DataLayout data_layout, ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100256 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100257 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
258 data_type, data_type, data_layout,
259 QuantizationInfo(), QuantizationInfo(), act_info);
Chunosov5124be52017-11-22 20:42:13 +0700260 }
261};
262
263template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100264class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Chunosov5124be52017-11-22 20:42:13 +0700265{
266public:
267 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000268 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100269 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700270 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100271 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
272 data_type, data_type, data_layout, quantization_info, quantization_info, act_info);
273 }
274};
275
276template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
277class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
278{
279public:
280 template <typename...>
281 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
282 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
283 {
284 std::vector<float> weights_scales{};
285 std::mt19937 gen(library->seed());
286 std::uniform_real_distribution<> dis(0.01f, 1);
287 for(size_t i = 0; i < output_shape[2]; ++i)
288 {
289 weights_scales.push_back(dis(gen));
290 }
291 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
292 reshape_weights, data_type, weights_data_type, data_layout,
293 quantization_info, QuantizationInfo(weights_scales), act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100294 }
295};
296} // namespace validation
297} // namespace test
298} // namespace arm_compute
299#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */