blob: 3c4b625ac69baa661def77011506a89bbfcd8cd3 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Georgios Pinitas8be91482019-03-26 17:23:28 +00002 * Copyright (c) 2017-2019 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010029#include "arm_compute/runtime/NEON/NEScheduler.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Michalis Spyroue2503892018-04-23 15:17:31 +010038#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000039#include "tests/validation/reference/Utils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010040
41#include <random>
42
43namespace arm_compute
44{
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010045class NEConvolutionLayer;
46
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010047namespace test
48{
49namespace validation
50{
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010051template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
Chunosov5124be52017-11-22 20:42:13 +070052class ConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010053{
54public:
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000055 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
56 || std::is_same<typename std::decay<T>::type, int8_t>::value,
57 int32_t, T >::type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010058
Chunosov5124be52017-11-22 20:42:13 +070059public:
60 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000061 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010062 DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +070063 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010064 _data_type = data_type;
65 _weights_data_type = weights_data_type;
66 _is_quantized = is_data_type_quantized_asymmetric(data_type);
67 _bias_data_type = _is_quantized ? DataType::S32 : data_type;
68 _quantization_info = quantization_info;
69 _weight_quantization_info = weight_quantization_info;
70 _data_layout = data_layout;
Chunosov5124be52017-11-22 20:42:13 +070071
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000072 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
73 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010074 }
75
76protected:
77 template <typename U>
78 void fill(U &&tensor, int i)
79 {
80 switch(tensor.data_type())
81 {
Chunosov5124be52017-11-22 20:42:13 +070082 case DataType::QASYMM8:
83 {
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010084 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
85 std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
Chunosov5124be52017-11-22 20:42:13 +070086 library->fill(tensor, distribution, i);
87 break;
88 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +000089 case DataType::QASYMM8_SIGNED:
90 {
91 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
92 std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
93 library->fill(tensor, distribution, i);
94 break;
95 }
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010096 case DataType::QSYMM8_PER_CHANNEL:
97 {
98 int min_bound = 128;
99 int max_bound = -127;
100 for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++)
101 {
102 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
103 if(bounds.first < min_bound)
104 {
105 min_bound = bounds.first;
106 }
107 if(bounds.second > max_bound)
108 {
109 max_bound = bounds.second;
110 }
111 }
112 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
113 library->fill(tensor, distribution, i);
114 break;
115 }
Chunosov5124be52017-11-22 20:42:13 +0700116 case DataType::S32:
117 {
118 std::uniform_int_distribution<int32_t> distribution(-100, 100);
119 library->fill(tensor, distribution, i);
120 break;
121 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100122 case DataType::F16:
123 case DataType::F32:
124 {
125 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
126 library->fill(tensor, distribution, i);
127 break;
128 }
129 default:
130 library->fill_tensor_uniform(tensor, i);
131 }
132 }
133
Michalis Spyroue2503892018-04-23 15:17:31 +0100134 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000135 bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100136 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100137 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
138
139 const unsigned int num_groups = input_shape[2] / weights_shape[2];
140
Michalis Spyroue2503892018-04-23 15:17:31 +0100141 if(_data_layout == DataLayout::NHWC)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100142 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100143 permute(input_shape, PermutationVector(2U, 0U, 1U));
144 permute(weights_shape, PermutationVector(2U, 0U, 1U));
145 permute(output_shape, PermutationVector(2U, 0U, 1U));
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100146 }
147
Michalis Spyroue2503892018-04-23 15:17:31 +0100148 const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
149 const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
150
151 WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
152 TensorShape reshaped_weights_shape(weights_shape);
153
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100154 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100155 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100156 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100157 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
158 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info, _data_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100159
160 // Create and configure function
161 FunctionType conv;
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100162 conv.configure(&src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100163
164 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
165 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
166 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
167 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
168
169 // Allocate tensors
170 src.allocator()->allocate();
171 weights.allocator()->allocate();
172 bias.allocator()->allocate();
173 dst.allocator()->allocate();
174
175 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
176 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
177 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
178 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
179
180 // Fill tensors
181 fill(AccessorType(src), 0);
Michalis Spyroue2503892018-04-23 15:17:31 +0100182 fill(AccessorType(weights), 1);
183 fill(AccessorType(bias), 2);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100184
185 // Compute NEConvolutionLayer function
186 conv.run();
187
188 return dst;
189 }
190
Alex Gilday7da29b62018-03-23 14:16:00 +0000191 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000192 const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100193 {
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100194 ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
195
196 const unsigned int num_groups = input_shape[2] / weights_shape[2];
197
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100198 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100199 SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100200 SimpleTensor<TW> weights{ weights_shape, _weights_data_type, 1, _weight_quantization_info };
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100201 SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100202
203 // Fill reference
204 fill(src, 0);
205 fill(weights, 1);
206 fill(bias, 2);
207
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100208 return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000209 act_info) :
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100210 reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100211 }
212
Chunosov5124be52017-11-22 20:42:13 +0700213 TensorType _target{};
214 SimpleTensor<T> _reference{};
215 DataType _data_type{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100216 DataType _weights_data_type{};
Chunosov5124be52017-11-22 20:42:13 +0700217 DataType _bias_data_type{};
Michalis Spyroue2503892018-04-23 15:17:31 +0100218 DataLayout _data_layout{};
Chunosov5124be52017-11-22 20:42:13 +0700219 QuantizationInfo _quantization_info{};
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100220 QuantizationInfo _weight_quantization_info{};
Chunosov5124be52017-11-22 20:42:13 +0700221 bool _is_quantized = false;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100222};
223
224template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100225class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100226{
227public:
228 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000229 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100230 DataLayout data_layout, ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100231 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100232 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
233 data_type, data_type, data_layout,
234 QuantizationInfo(), QuantizationInfo(), act_info);
Chunosov5124be52017-11-22 20:42:13 +0700235 }
236};
237
238template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100239class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
Chunosov5124be52017-11-22 20:42:13 +0700240{
241public:
242 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000243 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100244 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700245 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100246 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
247 data_type, data_type, data_layout, quantization_info, quantization_info, act_info);
248 }
249};
250
251template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
252class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
253{
254public:
255 template <typename...>
256 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
257 DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type)
258 {
259 std::vector<float> weights_scales{};
260 std::mt19937 gen(library->seed());
261 std::uniform_real_distribution<> dis(0.01f, 1);
262 for(size_t i = 0; i < output_shape[2]; ++i)
263 {
264 weights_scales.push_back(dis(gen));
265 }
266 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
267 reshape_weights, data_type, weights_data_type, data_layout,
268 quantization_info, QuantizationInfo(weights_scales), act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100269 }
270};
271} // namespace validation
272} // namespace test
273} // namespace arm_compute
274#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */