blob: b313c8490de049d99161a921beecbb7053020e62 [file] [log] [blame]
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +08001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2018 Arm Limited.
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +08003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/TensorShape.h"
25#include "arm_compute/core/Types.h"
26#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
27#include "tests/AssetsLibrary.h"
28#include "tests/Globals.h"
29#include "tests/IAccessor.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Fixture.h"
32#include "tests/validation/Helpers.h"
33#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
34#include "tests/validation/reference/ConvolutionLayer.h"
35
36#include <random>
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
45class DirectConvolutionValidationGenericTensorShiftFixture : public framework::Fixture
46{
47public:
48 using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
49
50public:
51 template <typename...>
52 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010053 DataType data_type, QuantizationInfo quantization_info)
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +080054 {
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +080055 _quantization_info = quantization_info;
56 _data_type = data_type;
57
58 const TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
59 const TensorShape bias_shape(num_kernels);
60 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
61 const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info);
62 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
63
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010064 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
65 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +080066 }
67
68 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000069 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010070 DataType data_type, QuantizationInfo quantization_info)
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +080071 {
Alex Gilday7da29b62018-03-23 14:16:00 +000072 ARM_COMPUTE_UNUSED(dilation_x, dilation_y);
73
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +080074 _quantization_info = quantization_info;
75 _data_type = data_type;
76
77 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
78
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010079 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
80 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +080081 }
82
83protected:
84 template <typename U>
85 void fill(U &&tensor, int i)
86 {
87 switch(tensor.data_type())
88 {
89 case DataType::QASYMM8:
90 {
91 std::uniform_int_distribution<uint8_t> distribution(0, 50);
92 library->fill(tensor, distribution, i);
93 break;
94 }
95 case DataType::F16:
96 case DataType::F32:
97 {
98 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
99 library->fill(tensor, distribution, i);
100 break;
101 }
102 case DataType::S32:
103 {
104 std::uniform_int_distribution<int32_t> distribution(-5, 5);
105 library->fill(tensor, distribution, i);
106 break;
107 }
108 default:
109 library->fill_tensor_uniform(tensor, i);
110 }
111 }
112
113 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100114 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800115 {
116 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100117 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info);
118 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info);
119 TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
120 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info);
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800121
122 TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100123 TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, quantization_info);
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800124
125 // Create and configure function
126 FunctionType conv;
127 conv.configure(&src, &weights, &bias, &dst, info);
128 FunctionType conv1;
129 conv1.configure(&dst, &weights, &bias, &dst1, info);
130
131 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
132 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
133 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
134 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
135 ARM_COMPUTE_EXPECT(dst1.info()->is_resizable(), framework::LogLevel::ERRORS);
136
137 // Allocate tensors
138 src.allocator()->allocate();
139 weights.allocator()->allocate();
140 bias.allocator()->allocate();
141 dst.allocator()->allocate();
142 dst1.allocator()->allocate();
143
144 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
145 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
146 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
147 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
148 ARM_COMPUTE_EXPECT(!dst1.info()->is_resizable(), framework::LogLevel::ERRORS);
149
150 // Fill tensors
151 fill(AccessorType(src), 0);
152 fill(AccessorType(weights), 1);
153 fill(AccessorType(bias), 2);
154
155 // Compute NEConvolutionLayer function
156 GCScheduler::get().memory_barrier();
157 conv.run();
158 GCScheduler::get().memory_barrier();
159 conv1.run();
160
161 return dst1;
162 }
163
164 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100165 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800166 {
167 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100168 SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
169 SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
170 SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800171
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100172 SimpleTensor<T> dst{ output_shape, data_type, 1, quantization_info };
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800173 TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
174
175 // Fill reference
176 fill(src, 0);
177 fill(weights, 1);
178 fill(bias, 2);
179
180 dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
181 return reference::convolution_layer<T>(dst, weights, bias, output_shape1, info);
182 }
183
184 TensorType _target{};
185 SimpleTensor<T> _reference{};
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800186 QuantizationInfo _quantization_info{};
187 DataType _data_type{};
188
189private:
190 TensorShape get_output_shape(TensorShape in_shape, TensorShape kernel_shape, const PadStrideInfo &info)
191 {
192 TensorShape out_shape(in_shape);
193 const std::pair<unsigned int, unsigned int> scaled_dims = scaled_dimensions(in_shape.x(),
194 in_shape.y(),
195 kernel_shape.x(),
196 kernel_shape.y(),
197 info);
198 out_shape.set(0, scaled_dims.first);
199 out_shape.set(1, scaled_dims.second);
200 out_shape.set(2, kernel_shape[3]);
201 return out_shape;
202 }
203};
204
205template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
206class DirectConvolutionValidationTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
207{
208public:
209 template <typename...>
210 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
211 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100212 DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800213 QuantizationInfo());
214 }
215};
216
217template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800218class DirectConvolutionValidationQuantizedTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
219{
220public:
221 template <typename...>
222 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
223 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100224 DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800225 quantization_info);
226 }
227};
228
229template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
230class DirectConvolutionValidationWithTensorShapesQuantizedTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
231{
232public:
233 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000234 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800235 DataType data_type, QuantizationInfo quantization_info)
236 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100237 DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
Alex Gilday7da29b62018-03-23 14:16:00 +0000238 quantization_info);
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800239 }
240};
241
242template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
243class DirectConvolutionValidationWithTensorShapesTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>
244{
245public:
246 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000247 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800248 DataType data_type)
249 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100250 DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
Alex Gilday7da29b62018-03-23 14:16:00 +0000251 QuantizationInfo());
Xinghang Zhou33ff9ef2018-01-17 11:23:39 +0800252 }
253};
254
255} // namespace validation
256} // namespace test
257} // namespace arm_compute