blob: 49c3be0c2e6d16dadcb1ef6b450a865e4bf21a89 [file] [log] [blame]
Moritz Pflanzer69d33412017-08-09 11:45:15 +01001/*
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +01002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzer69d33412017-08-09 11:45:15 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "arm_compute/core/Utils.h"
Moritz Pflanzer69d33412017-08-09 11:45:15 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/RawTensor.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010034#include "tests/framework/Asserts.h"
35#include "tests/framework/Fixture.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010036#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/FullyConnectedLayer.h"
38#include "tests/validation/reference/Utils.h"
Moritz Pflanzer69d33412017-08-09 11:45:15 +010039
40#include <random>
41
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
Giorgio Arenaa855af12018-07-16 17:20:38 +010048template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000049class FullyConnectedLayerValidationGenericFixture : public framework::Fixture
Moritz Pflanzer69d33412017-08-09 11:45:15 +010050{
51public:
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000052 using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
53
54public:
Moritz Pflanzer69d33412017-08-09 11:45:15 +010055 template <typename...>
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000056 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010057 DataType data_type, QuantizationInfo quantization_info)
Moritz Pflanzer69d33412017-08-09 11:45:15 +010058 {
59 ARM_COMPUTE_UNUSED(weights_shape);
60 ARM_COMPUTE_UNUSED(bias_shape);
61
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000062 _data_type = data_type;
63 _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000064 _quantization_info = quantization_info;
Moritz Pflanzer69d33412017-08-09 11:45:15 +010065
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000066 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
67 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
Moritz Pflanzer69d33412017-08-09 11:45:15 +010068 }
69
70protected:
71 template <typename U>
72 void fill(U &&tensor, int i)
73 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000074 if(is_data_type_quantized_asymmetric(_data_type))
75 {
76 std::uniform_int_distribution<uint8_t> distribution(0, 30);
77 library->fill(tensor, distribution, i);
78 }
79 else if(_data_type == DataType::S32)
80 {
81 std::uniform_int_distribution<int32_t> distribution(-50, 50);
82 library->fill(tensor, distribution, i);
83 }
84 else if(is_data_type_float(_data_type))
Moritz Pflanzer69d33412017-08-09 11:45:15 +010085 {
86 std::uniform_real_distribution<> distribution(0.5f, 1.f);
87 library->fill(tensor, distribution, i);
88 }
89 else
90 {
91 library->fill_tensor_uniform(tensor, i);
92 }
93 }
94
95 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights,
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000096 bool reshape_weights)
Moritz Pflanzer69d33412017-08-09 11:45:15 +010097 {
98 TensorShape reshaped_weights_shape(weights_shape);
99
100 // Test actions depending on the target settings
101 //
102 // | reshape | !reshape
103 // -----------+-----------+---------------------------
104 // transpose | | ***
105 // -----------+-----------+---------------------------
Giorgio Arenaa855af12018-07-16 17:20:38 +0100106 // !transpose | transpose | transpose
107 // | |
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100108 //
109 // ***: That combination is invalid. But we can ignore the transpose flag and handle all !reshape the same
110 if(!reshape_weights || !transpose_weights)
111 {
112 const size_t shape_x = reshaped_weights_shape.x();
113 reshaped_weights_shape.set(0, reshaped_weights_shape.y());
114 reshaped_weights_shape.set(1, shape_x);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100115 }
116
117 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100118 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info);
119 TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info);
120 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
121 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100122
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100123 // Create Fully Connected layer info
124 FullyConnectedLayerInfo fc_info;
125 fc_info.transpose_weights = transpose_weights;
126 fc_info.are_weights_reshaped = !reshape_weights;
127
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100128 // Create and configure function.
129 FunctionType fc;
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100130 fc.configure(&src, &weights, &bias, &dst, fc_info);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100131
132 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
133 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
134 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
135 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
136
137 // Allocate tensors
138 src.allocator()->allocate();
139 weights.allocator()->allocate();
140 bias.allocator()->allocate();
141 dst.allocator()->allocate();
142
143 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
144 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
145 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
146 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
147
148 // Fill tensors
149 fill(AccessorType(src), 0);
150 fill(AccessorType(bias), 2);
151
152 if(!reshape_weights || !transpose_weights)
153 {
154 TensorShape tmp_shape(weights_shape);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100155 RawTensor tmp(tmp_shape, _data_type, 1);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100156
157 // Fill with original shape
158 fill(tmp, 1);
159
160 // Transpose elementwise
161 tmp = transpose(tmp);
162
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100163 AccessorType weights_accessor(weights);
164
165 for(int i = 0; i < tmp.num_elements(); ++i)
166 {
167 Coordinates coord = index2coord(tmp.shape(), i);
168 std::copy_n(static_cast<const RawTensor::value_type *>(tmp(coord)),
169 tmp.element_size(),
170 static_cast<RawTensor::value_type *>(weights_accessor(coord)));
171 }
172 }
173 else
174 {
175 fill(AccessorType(weights), 1);
176 }
177
178 // Compute NEFullyConnectedLayer function
179 fc.run();
180
181 return dst;
182 }
183
184 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights,
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000185 bool reshape_weights)
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100186 {
187 // Create reference
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100188 SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
189 SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info };
190 SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100191
192 // Fill reference
193 fill(src, 0);
194 fill(weights, 1);
195 fill(bias, 2);
196
197 return reference::fully_connected_layer<T>(src, weights, bias, output_shape);
198 }
199
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000200 TensorType _target{};
201 SimpleTensor<T> _reference{};
202 DataType _data_type{};
203 DataType _bias_data_type{};
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000204 QuantizationInfo _quantization_info{};
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100205};
206
Giorgio Arenaa855af12018-07-16 17:20:38 +0100207template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
208class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100209{
210public:
211 template <typename...>
212 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type)
213 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100214 FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
215 reshape_weights, data_type,
216 QuantizationInfo());
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000217 }
218};
219
Giorgio Arenaa855af12018-07-16 17:20:38 +0100220template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
221class FullyConnectedLayerValidationFixedPointFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000222{
223public:
224 template <typename...>
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100225 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000226 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100227 FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
228 reshape_weights, data_type,
229 QuantizationInfo());
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000230 }
231};
232
Giorgio Arenaa855af12018-07-16 17:20:38 +0100233template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
234class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000235{
236public:
237 template <typename...>
238 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type,
239 QuantizationInfo quantization_info)
240 {
Giorgio Arenaa855af12018-07-16 17:20:38 +0100241 FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
242 reshape_weights, data_type,
243 quantization_info);
Moritz Pflanzer69d33412017-08-09 11:45:15 +0100244 }
245};
246} // namespace validation
247} // namespace test
248} // namespace arm_compute
249#endif /* ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE */