blob: 93de24d1bd6b30ab936fa37c866cbb5625a69c72 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Alex Gilday7da29b62018-03-23 14:16:00 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010029#include "arm_compute/runtime/NEON/NEScheduler.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010033#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/validation/Helpers.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "tests/validation/reference/ActivationLayer.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
Michalis Spyroue2503892018-04-23 15:17:31 +010038#include "tests/validation/reference/Permute.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000039#include "tests/validation/reference/Utils.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010040
41#include <random>
42
43namespace arm_compute
44{
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010045class NEConvolutionLayer;
46
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010047namespace test
48{
49namespace validation
50{
51template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Chunosov5124be52017-11-22 20:42:13 +070052class ConvolutionValidationGenericFixture : public framework::Fixture
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010053{
54public:
Chunosov5124be52017-11-22 20:42:13 +070055 using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010056
Chunosov5124be52017-11-22 20:42:13 +070057public:
58 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +000059 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
Michalis Spyroue2503892018-04-23 15:17:31 +010060 DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +070061 {
62 _data_type = data_type;
63 _is_quantized = is_data_type_quantized_asymmetric(data_type);
64 _bias_data_type = _is_quantized ? DataType::S32 : data_type;
65 _fractional_bits = fractional_bits;
66 _quantization_info = quantization_info;
Michalis Spyroue2503892018-04-23 15:17:31 +010067 _data_layout = data_layout;
Chunosov5124be52017-11-22 20:42:13 +070068
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000069 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info);
70 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010071 }
72
73protected:
74 template <typename U>
75 void fill(U &&tensor, int i)
76 {
77 switch(tensor.data_type())
78 {
Chunosov5124be52017-11-22 20:42:13 +070079 case DataType::QASYMM8:
80 {
81 std::uniform_int_distribution<uint8_t> distribution(0, 3);
82 library->fill(tensor, distribution, i);
83 break;
84 }
85 case DataType::S32:
86 {
87 std::uniform_int_distribution<int32_t> distribution(-100, 100);
88 library->fill(tensor, distribution, i);
89 break;
90 }
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010091 case DataType::F16:
92 case DataType::F32:
93 {
94 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
95 library->fill(tensor, distribution, i);
96 break;
97 }
98 default:
99 library->fill_tensor_uniform(tensor, i);
100 }
101 }
102
Michalis Spyroue2503892018-04-23 15:17:31 +0100103 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000104 bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100105 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100106 if(_data_layout == DataLayout::NHWC)
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100107 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100108 permute(input_shape, PermutationVector(2U, 0U, 1U));
109 permute(weights_shape, PermutationVector(2U, 0U, 1U));
110 permute(output_shape, PermutationVector(2U, 0U, 1U));
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100111 }
112
Michalis Spyroue2503892018-04-23 15:17:31 +0100113 const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
114 const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
115
116 WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
117 TensorShape reshaped_weights_shape(weights_shape);
118
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100119 // Create tensors
Michalis Spyroue2503892018-04-23 15:17:31 +0100120 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
121 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
122 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info, _data_layout);
123 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100124
125 // Create and configure function
126 FunctionType conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000127 conv.configure(&src, &weights, &bias, &dst, info, weights_info, dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100128
129 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
130 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
131 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
132 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
133
134 // Allocate tensors
135 src.allocator()->allocate();
136 weights.allocator()->allocate();
137 bias.allocator()->allocate();
138 dst.allocator()->allocate();
139
140 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
141 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
142 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
143 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
144
145 // Fill tensors
146 fill(AccessorType(src), 0);
Michalis Spyroue2503892018-04-23 15:17:31 +0100147 fill(AccessorType(weights), 1);
148 fill(AccessorType(bias), 2);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100149
150 // Compute NEConvolutionLayer function
151 conv.run();
152
153 return dst;
154 }
155
Alex Gilday7da29b62018-03-23 14:16:00 +0000156 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000157 const Size2D &dilation, const ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100158 {
159 // Create reference
Chunosov5124be52017-11-22 20:42:13 +0700160 SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
161 SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
162 SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100163
164 // Fill reference
165 fill(src, 0);
166 fill(weights, 1);
167 fill(bias, 2);
168
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000169 return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation),
170 act_info) :
171 reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100172 }
173
Chunosov5124be52017-11-22 20:42:13 +0700174 TensorType _target{};
175 SimpleTensor<T> _reference{};
176 DataType _data_type{};
177 DataType _bias_data_type{};
Michalis Spyroue2503892018-04-23 15:17:31 +0100178 DataLayout _data_layout{};
Chunosov5124be52017-11-22 20:42:13 +0700179 int _fractional_bits{};
180 QuantizationInfo _quantization_info{};
181 bool _is_quantized = false;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100182};
183
184template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Chunosov5124be52017-11-22 20:42:13 +0700185class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100186{
187public:
188 template <typename...>
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000189 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100190 DataLayout data_layout, ActivationLayerInfo act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100191 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100192 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout, 0,
193 QuantizationInfo(), act_info);
Chunosov5124be52017-11-22 20:42:13 +0700194 }
195};
196
197template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
198class ConvolutionValidationFixedPointFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
199{
200public:
201 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000202 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Michalis Spyroue2503892018-04-23 15:17:31 +0100203 int fractional_bits, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700204 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100205 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type,
206 DataLayout::NCHW,
207 fractional_bits, QuantizationInfo(), act_info);
Chunosov5124be52017-11-22 20:42:13 +0700208 }
209};
210
211template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
212class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
213{
214public:
215 template <typename...>
Alex Gilday7da29b62018-03-23 14:16:00 +0000216 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000217 QuantizationInfo quantization_info, ActivationLayerInfo act_info)
Chunosov5124be52017-11-22 20:42:13 +0700218 {
Michalis Spyroue2503892018-04-23 15:17:31 +0100219 ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type,
220 DataLayout::NCHW, 0,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000221 quantization_info, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100222 }
223};
224} // namespace validation
225} // namespace test
226} // namespace arm_compute
227#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */