blob: 38efe475b377aa6b50b98e53db3c481daed6c5a0 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2017-2021 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Giorgio Arenac0f54432018-03-16 14:02:34 +000024#include "arm_compute/core/Helpers.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010025#include "arm_compute/core/TensorShape.h"
26#include "arm_compute/core/Types.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000027#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "tests/AssetsLibrary.h"
29#include "tests/Globals.h"
30#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/framework/Asserts.h"
32#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/validation/Helpers.h"
34#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000035#include "tests/validation/reference/ConvolutionLayer.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000036#include "tests/validation/reference/Permute.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010037
38#include <random>
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
Giorgio Arenac0f54432018-03-16 14:02:34 +000046using namespace arm_compute::misc::shape_calculator;
47
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010048template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Chunosovd621bca2017-11-03 17:33:15 +070049class DirectConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010050{
51public:
Sheri Zhang681f2d42020-02-20 11:23:08 +000052 using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
Georgios Pinitas540d0082017-11-17 10:55:00 +000053
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010054 template <typename...>
Chunosovd621bca2017-11-03 17:33:15 +070055 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
Manuel Bottinica62c6f2021-03-23 11:50:34 +000056 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010057 {
Chunosovd621bca2017-11-03 17:33:15 +070058 _quantization_info = quantization_info;
59 _data_type = data_type;
Manuel Bottinica62c6f2021-03-23 11:50:34 +000060 _mixed_layout = mixed_layout;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010061
Giorgio Arenac0f54432018-03-16 14:02:34 +000062 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063 const TensorShape bias_shape(num_kernels);
64 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
Georgios Pinitas540d0082017-11-17 10:55:00 +000065 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010066
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010067 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
68 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
Giorgio Arenac0f54432018-03-16 14:02:34 +000069
Giorgio Arenac0f54432018-03-16 14:02:34 +000070 const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
71
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010072 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
73 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010074 }
75
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000076 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000077 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010078 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000079 {
Giorgio Arenac0f54432018-03-16 14:02:34 +000080 ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
Alex Gilday7da29b62018-03-23 14:16:00 +000081 ARM_COMPUTE_UNUSED(dilation);
82
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000083 _quantization_info = quantization_info;
84 _data_type = data_type;
85
86 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
87
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010088 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
89 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000090 }
91
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010092protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +000093 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
94 {
95 DataLayout data_layout = src.info()->data_layout();
96 // Test Multi DataLayout graph cases, when the data layout changes after configure
97 src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
98 dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
99
100 // Compute Convolution function
101 layer.run();
102
103 // Reinstating original data layout for the test suite to properly check the values
104 src.info()->set_data_layout(data_layout);
105 dst.info()->set_data_layout(data_layout);
106 }
107
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100108 template <typename U>
109 void fill(U &&tensor, int i)
110 {
111 switch(tensor.data_type())
112 {
Chunosovd621bca2017-11-03 17:33:15 +0700113 case DataType::QASYMM8:
114 {
Georgios Pinitas6fdfaa82017-11-29 14:27:24 +0000115 std::uniform_int_distribution<uint8_t> distribution(0, 50);
Chunosovd621bca2017-11-03 17:33:15 +0700116 library->fill(tensor, distribution, i);
117 break;
118 }
Sheri Zhang681f2d42020-02-20 11:23:08 +0000119 case DataType::QASYMM8_SIGNED:
120 {
Sheri Zhang970353e2020-03-19 14:29:49 +0000121 // Use small input range to avoid all the test results being saturated at the end.
122 std::uniform_int_distribution<int8_t> distribution(-25, 25);
Sheri Zhang681f2d42020-02-20 11:23:08 +0000123 library->fill(tensor, distribution, i);
124 break;
125 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100126 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000127 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000128 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000129 library->fill(tensor, distribution, i);
130 break;
131 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100132 case DataType::F32:
133 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000134 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100135 library->fill(tensor, distribution, i);
136 break;
137 }
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700138 case DataType::S32:
139 {
Georgios Pinitas6fdfaa82017-11-29 14:27:24 +0000140 std::uniform_int_distribution<int32_t> distribution(-5, 5);
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700141 library->fill(tensor, distribution, i);
142 break;
143 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100144 default:
145 library->fill_tensor_uniform(tensor, i);
146 }
147 }
148
Giorgio Arena563494c2018-04-30 17:29:41 +0100149 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100150 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100151 {
Giorgio Arena563494c2018-04-30 17:29:41 +0100152 if(data_layout == DataLayout::NHWC)
153 {
154 permute(input_shape, PermutationVector(2U, 0U, 1U));
155 permute(weights_shape, PermutationVector(2U, 0U, 1U));
156 permute(output_shape, PermutationVector(2U, 0U, 1U));
157 }
158
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100159 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100160 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
161 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
162 TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
163 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100164
165 // Create and configure function
166 FunctionType conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000167 conv.configure(&src, &weights, &bias, &dst, info, act_info);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100168
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100169 ARM_COMPUTE_ASSERT(src.info()->is_resizable());
170 ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
171 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
172 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100173
Giorgio Arenaebbb6f92021-04-13 09:52:18 +0100174 add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
Giorgio Arena63825e82021-03-25 14:54:50 +0000175
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100176 // Allocate tensors
177 src.allocator()->allocate();
178 weights.allocator()->allocate();
179 bias.allocator()->allocate();
180 dst.allocator()->allocate();
181
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100182 ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
183 ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
184 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
185 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100186
187 // Fill tensors
188 fill(AccessorType(src), 0);
189 fill(AccessorType(weights), 1);
190 fill(AccessorType(bias), 2);
191
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000192 if(_mixed_layout)
193 {
194 mix_layout(conv, src, dst);
195 }
196 else
197 {
198 // Compute Convolution function
199 conv.run();
200 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100201
202 return dst;
203 }
204
205 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100206 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100207 {
208 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100209 SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
210 SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
211 SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100212
213 // Fill reference
214 fill(src, 0);
215 fill(weights, 1);
216 fill(bias, 2);
217
Giorgio Arena563494c2018-04-30 17:29:41 +0100218 SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000219 return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100220 }
Chunosovd621bca2017-11-03 17:33:15 +0700221 TensorType _target{};
222 SimpleTensor<T> _reference{};
Chunosovd621bca2017-11-03 17:33:15 +0700223 QuantizationInfo _quantization_info{};
224 DataType _data_type{};
Giorgio Arena63825e82021-03-25 14:54:50 +0000225 bool _mixed_layout{ false };
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100226};
227
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000228template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700229class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100230{
231public:
232 template <typename...>
Giorgio Arenac0f54432018-03-16 14:02:34 +0000233 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
234 DataLayout data_layout)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100235 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100236 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000237 act_info, data_layout, mixed_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100238 }
239};
Chunosovd621bca2017-11-03 17:33:15 +0700240
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000241template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700242class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
243{
244public:
245 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000246 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000247 ActivationLayerInfo act_info, DataLayout data_layout)
Chunosovd621bca2017-11-03 17:33:15 +0700248 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100249 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000250 act_info, data_layout, mixed_layout);
Chunosovd621bca2017-11-03 17:33:15 +0700251 }
252};
253
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000254template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
255class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
256{
257public:
258 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000259 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Giorgio Arenae620a832020-02-17 16:33:20 +0000260 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000261 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100262 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000263 act_info, data_layout);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000264 }
265};
266
267template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
268class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
269{
270public:
271 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000272 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000273 DataType data_type, ActivationLayerInfo act_info)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000274 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100275 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(),
Giorgio Arenac0f54432018-03-16 14:02:34 +0000276 act_info, DataLayout::NCHW);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000277 }
278};
279
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100280} // namespace validation
281} // namespace test
282} // namespace arm_compute