blob: 5ed0b9f9a3da87ca1b6fad67ca2d26b83f3ae0cf [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2017-2021 Arm Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Giorgio Arenac0f54432018-03-16 14:02:34 +000024#include "arm_compute/core/Helpers.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010025#include "arm_compute/core/TensorShape.h"
26#include "arm_compute/core/Types.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000027#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "tests/AssetsLibrary.h"
29#include "tests/Globals.h"
30#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010031#include "tests/framework/Asserts.h"
32#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/validation/Helpers.h"
34#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000035#include "tests/validation/reference/ConvolutionLayer.h"
Giorgio Arenac0f54432018-03-16 14:02:34 +000036#include "tests/validation/reference/Permute.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010037
38#include <random>
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
Giorgio Arenac0f54432018-03-16 14:02:34 +000046using namespace arm_compute::misc::shape_calculator;
47
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010048template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Chunosovd621bca2017-11-03 17:33:15 +070049class DirectConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010050{
51public:
Sheri Zhang681f2d42020-02-20 11:23:08 +000052 using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type;
Georgios Pinitas540d0082017-11-17 10:55:00 +000053
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010054 template <typename...>
Chunosovd621bca2017-11-03 17:33:15 +070055 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
Manuel Bottinica62c6f2021-03-23 11:50:34 +000056 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010057 {
Chunosovd621bca2017-11-03 17:33:15 +070058 _quantization_info = quantization_info;
59 _data_type = data_type;
Manuel Bottinica62c6f2021-03-23 11:50:34 +000060 _mixed_layout = mixed_layout;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010061
Giorgio Arenac0f54432018-03-16 14:02:34 +000062 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063 const TensorShape bias_shape(num_kernels);
64 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
Georgios Pinitas540d0082017-11-17 10:55:00 +000065 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010066
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010067 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
68 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
Giorgio Arenac0f54432018-03-16 14:02:34 +000069
Giorgio Arenac0f54432018-03-16 14:02:34 +000070 const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
71
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010072 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
73 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010074 }
75
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000076 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000077 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010078 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000079 {
Giorgio Arenac0f54432018-03-16 14:02:34 +000080 ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
Alex Gilday7da29b62018-03-23 14:16:00 +000081 ARM_COMPUTE_UNUSED(dilation);
82
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000083 _quantization_info = quantization_info;
84 _data_type = data_type;
85
86 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
87
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010088 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
89 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +000090 }
91
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010092protected:
Manuel Bottinica62c6f2021-03-23 11:50:34 +000093
94 void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
95 {
96 DataLayout data_layout = src.info()->data_layout();
97 // Test Multi DataLayout graph cases, when the data layout changes after configure
98 src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
99 dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
100
101 // Compute Convolution function
102 layer.run();
103
104 // Reinstating original data layout for the test suite to properly check the values
105 src.info()->set_data_layout(data_layout);
106 dst.info()->set_data_layout(data_layout);
107 }
108
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100109 template <typename U>
110 void fill(U &&tensor, int i)
111 {
112 switch(tensor.data_type())
113 {
Chunosovd621bca2017-11-03 17:33:15 +0700114 case DataType::QASYMM8:
115 {
Georgios Pinitas6fdfaa82017-11-29 14:27:24 +0000116 std::uniform_int_distribution<uint8_t> distribution(0, 50);
Chunosovd621bca2017-11-03 17:33:15 +0700117 library->fill(tensor, distribution, i);
118 break;
119 }
Sheri Zhang681f2d42020-02-20 11:23:08 +0000120 case DataType::QASYMM8_SIGNED:
121 {
Sheri Zhang970353e2020-03-19 14:29:49 +0000122 // Use small input range to avoid all the test results being saturated at the end.
123 std::uniform_int_distribution<int8_t> distribution(-25, 25);
Sheri Zhang681f2d42020-02-20 11:23:08 +0000124 library->fill(tensor, distribution, i);
125 break;
126 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100127 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000128 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +0000129 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000130 library->fill(tensor, distribution, i);
131 break;
132 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100133 case DataType::F32:
134 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +0000135 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100136 library->fill(tensor, distribution, i);
137 break;
138 }
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700139 case DataType::S32:
140 {
Georgios Pinitas6fdfaa82017-11-29 14:27:24 +0000141 std::uniform_int_distribution<int32_t> distribution(-5, 5);
Dmitry Savenkod7295b72017-11-20 22:00:08 +0700142 library->fill(tensor, distribution, i);
143 break;
144 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100145 default:
146 library->fill_tensor_uniform(tensor, i);
147 }
148 }
149
Giorgio Arena563494c2018-04-30 17:29:41 +0100150 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100151 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100152 {
Giorgio Arena563494c2018-04-30 17:29:41 +0100153 if(data_layout == DataLayout::NHWC)
154 {
155 permute(input_shape, PermutationVector(2U, 0U, 1U));
156 permute(weights_shape, PermutationVector(2U, 0U, 1U));
157 permute(output_shape, PermutationVector(2U, 0U, 1U));
158 }
159
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100160 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100161 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
162 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
163 TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
164 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100165
166 // Create and configure function
167 FunctionType conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000168 conv.configure(&src, &weights, &bias, &dst, info, act_info);
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100169
170 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
171 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
172 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
174
175 // Allocate tensors
176 src.allocator()->allocate();
177 weights.allocator()->allocate();
178 bias.allocator()->allocate();
179 dst.allocator()->allocate();
180
181 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
182 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
183 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
184 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
185
186 // Fill tensors
187 fill(AccessorType(src), 0);
188 fill(AccessorType(weights), 1);
189 fill(AccessorType(bias), 2);
190
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000191 if(_mixed_layout)
192 {
193 mix_layout(conv, src, dst);
194 }
195 else
196 {
197 // Compute Convolution function
198 conv.run();
199 }
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100200
201 return dst;
202 }
203
204 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100205 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100206 {
207 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100208 SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
209 SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
210 SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100211
212 // Fill reference
213 fill(src, 0);
214 fill(weights, 1);
215 fill(bias, 2);
216
Giorgio Arena563494c2018-04-30 17:29:41 +0100217 SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000218 return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst;
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100219 }
Chunosovd621bca2017-11-03 17:33:15 +0700220 TensorType _target{};
221 SimpleTensor<T> _reference{};
Chunosovd621bca2017-11-03 17:33:15 +0700222 QuantizationInfo _quantization_info{};
223 DataType _data_type{};
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000224 bool _mixed_layout {false};
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100225};
226
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000227template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700228class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100229{
230public:
231 template <typename...>
Giorgio Arenac0f54432018-03-16 14:02:34 +0000232 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
233 DataLayout data_layout)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100234 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100235 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000236 act_info, data_layout, mixed_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100237 }
238};
Chunosovd621bca2017-11-03 17:33:15 +0700239
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000240template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
Chunosovd621bca2017-11-03 17:33:15 +0700241class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
242{
243public:
244 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000245 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000246 ActivationLayerInfo act_info, DataLayout data_layout)
Chunosovd621bca2017-11-03 17:33:15 +0700247 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100248 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000249 act_info, data_layout, mixed_layout);
Chunosovd621bca2017-11-03 17:33:15 +0700250 }
251};
252
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000253template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
254class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
255{
256public:
257 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000258 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Giorgio Arenae620a832020-02-17 16:33:20 +0000259 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000260 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100261 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info,
Giorgio Arenae620a832020-02-17 16:33:20 +0000262 act_info, data_layout);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000263 }
264};
265
266template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
267class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
268{
269public:
270 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000271 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000272 DataType data_type, ActivationLayerInfo act_info)
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000273 {
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100274 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(),
Giorgio Arenac0f54432018-03-16 14:02:34 +0000275 act_info, DataLayout::NCHW);
Jaroslaw Rzepecki2ecbada2017-11-29 13:51:34 +0000276 }
277};
278
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100279} // namespace validation
280} // namespace test
281} // namespace arm_compute