blob: 73cb8328eac80fbdff68c89931dd3fafd6d741e5 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01002 * Copyright (c) 2017-2018 ARM Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "tests/AssetsLibrary.h"
30#include "tests/Globals.h"
31#include "tests/IAccessor.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Fixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010034#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000035#include "tests/validation/reference/GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036
37#include <random>
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010045template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
Gian Marcoe75a02b2017-11-08 12:24:09 +000046class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +010047{
48public:
49 template <typename...>
Gian Marcoe75a02b2017-11-08 12:24:09 +000050 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +010051 {
Gian Marcoe75a02b2017-11-08 12:24:09 +000052 _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset);
53 _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +010054 }
55
56protected:
57 template <typename U>
58 void fill(U &&tensor, int i)
59 {
Gian Marcoe75a02b2017-11-08 12:24:09 +000060 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
61 std::uniform_int_distribution<> distribution(1, 254);
Pablo Tello299025a2017-09-29 11:30:12 +010062 library->fill(tensor, distribution, i);
63 }
64
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010065 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +010066 {
67 // Create tensors
Gian Marcoe75a02b2017-11-08 12:24:09 +000068 TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
69 TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
Pablo Tello6ff12a02017-11-02 16:09:35 +000070 TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
Pablo Tellobf2fb952017-09-29 16:43:25 +010071
Gian Marcoe75a02b2017-11-08 12:24:09 +000072 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
73 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
74
Pablo Tellobf2fb952017-09-29 16:43:25 +010075 // Create and configure function
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010076 // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
Pablo Tellobf2fb952017-09-29 16:43:25 +010077 FunctionType gemmlowp;
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010078 gemmlowp.configure(&a, &b, &c, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_c[2] : 1), reinterpret_input_as_3d));
Pablo Tellobf2fb952017-09-29 16:43:25 +010079
80 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
81 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
82 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
83
84 // Allocate tensors
85 a.allocator()->allocate();
86 b.allocator()->allocate();
87 c.allocator()->allocate();
88
89 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
90 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
91 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
92
93 // Fill tensors
Gian Marcoe75a02b2017-11-08 12:24:09 +000094 fill(AccessorType(a), 0);
95 fill(AccessorType(b), 1);
Pablo Tellobf2fb952017-09-29 16:43:25 +010096
97 // Compute GEMM function
98 gemmlowp.run();
99 return c;
100 }
101
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100102 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, int32_t a_offset, int32_t b_offset)
Pablo Tellobf2fb952017-09-29 16:43:25 +0100103 {
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100104 TensorShape shape_a_to_use = shape_a;
105 if(reinterpret_input_as_3d)
106 {
107 // Collapse the second and third dimension if the input is 3D
108 shape_a_to_use.collapse(2U, 1U);
109 }
110
Pablo Tellobf2fb952017-09-29 16:43:25 +0100111 // Create reference
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100112 SimpleTensor<uint8_t> a{ shape_a_to_use, DataType::QASYMM8, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000113 SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
Pablo Tellobf2fb952017-09-29 16:43:25 +0100114
115 // Fill reference
Gian Marcoe75a02b2017-11-08 12:24:09 +0000116 fill(a, 0);
117 fill(b, 1);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100118
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100119 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, shape_c, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100120 }
121
Pablo Tello6ff12a02017-11-02 16:09:35 +0000122 TensorType _target{};
123 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100124};
125
Gian Marcoe75a02b2017-11-08 12:24:09 +0000126template <typename TensorType, typename AccessorType, typename FunctionType>
127class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
128{
129public:
130 template <typename...>
Gian Marco6b77e912017-11-17 09:27:57 +0000131 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000132 {
Gian Marco6b77e912017-11-17 09:27:57 +0000133 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
134 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000135 }
136
137protected:
138 template <typename U>
139 void fill(U &&tensor, int i)
140 {
141 std::uniform_int_distribution<> distribution(-6000, 6000);
142 library->fill(tensor, distribution, i);
143 }
144
Gian Marco6b77e912017-11-17 09:27:57 +0000145 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000146 {
Gian Marco6b77e912017-11-17 09:27:57 +0000147 TensorShape shape_bias(shape[0]);
148
Gian Marcoe75a02b2017-11-08 12:24:09 +0000149 // Create tensors
150 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
Gian Marco6b77e912017-11-17 09:27:57 +0000151 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
152 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000153
154 // Create and configure function
155 FunctionType output_stage;
Gian Marco6b77e912017-11-17 09:27:57 +0000156 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_offset, result_mult_int, result_shift, min, max);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000157
158 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000159 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000160
161 // Allocate tensors
162 a.allocator()->allocate();
Gian Marco6b77e912017-11-17 09:27:57 +0000163 c.allocator()->allocate();
Gian Marcoe75a02b2017-11-08 12:24:09 +0000164
165 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000166 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000167
Gian Marco6b77e912017-11-17 09:27:57 +0000168 // Fill tensor
Gian Marcoe75a02b2017-11-08 12:24:09 +0000169 fill(AccessorType(a), 0);
170
Gian Marco6b77e912017-11-17 09:27:57 +0000171 if(add_bias)
172 {
173 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
174
175 // Allocate bias tensor
176 b.allocator()->allocate();
177
178 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
179
180 // Fill tensor
181 fill(AccessorType(b), 1);
182 }
183
Gian Marcoe75a02b2017-11-08 12:24:09 +0000184 // Compute GEMM function
185 output_stage.run();
Gian Marco6b77e912017-11-17 09:27:57 +0000186 return c;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000187 }
188
Gian Marco6b77e912017-11-17 09:27:57 +0000189 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000190 {
191 // Create reference
Gian Marco6b77e912017-11-17 09:27:57 +0000192 TensorShape shape_bias(shape[0]);
193
Gian Marcoe75a02b2017-11-08 12:24:09 +0000194 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
Gian Marco6b77e912017-11-17 09:27:57 +0000195 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000196
197 // Fill reference
198 fill(a, 0);
199
Gian Marco6b77e912017-11-17 09:27:57 +0000200 if(add_bias)
201 {
202 // Fill bias
203 fill(b, 1);
204
205 return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, b, result_offset, result_mult_int, result_shift, min, max);
206 }
207 else
208 {
209 return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, result_offset, result_mult_int, result_shift, min, max);
210 }
Gian Marcoe75a02b2017-11-08 12:24:09 +0000211 }
212
213 TensorType _target{};
214 SimpleTensor<uint8_t> _reference{};
215};
Gian Marco58c57942017-11-28 09:10:03 +0000216
217template <typename TensorType, typename AccessorType, typename FunctionType>
218class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
219{
220public:
221 template <typename...>
222 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
223 {
224 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
225 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
226 }
227
228protected:
229 template <typename U>
230 void fill(U &&tensor, int i)
231 {
232 std::uniform_int_distribution<> distribution(-6000, 6000);
233 library->fill(tensor, distribution, i);
234 }
235
236 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
237 {
238 TensorShape shape_bias(shape[0]);
239
240 // Create tensors
241 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
242 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
243 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
244
245 // Create and configure function
246 FunctionType output_stage;
247 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
248
249 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
250 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
251
252 // Allocate tensors
253 a.allocator()->allocate();
254 c.allocator()->allocate();
255
256 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
257 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
258
259 // Fill tensor
260 fill(AccessorType(a), 0);
261
262 if(add_bias)
263 {
264 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
265
266 // Allocate bias tensor
267 b.allocator()->allocate();
268
269 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
270
271 // Fill tensor
272 fill(AccessorType(b), 1);
273 }
274
275 // Compute GEMM function
276 output_stage.run();
277 return c;
278 }
279
280 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
281 bool add_bias)
282 {
283 // Create reference
284 TensorShape shape_bias(shape[0]);
285
286 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
287 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
288
289 // Fill reference
290 fill(a, 0);
291
292 if(add_bias)
293 {
294 // Fill bias
295 fill(b, 1);
296
297 return reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(a, b, result_fixed_point_multiplier, result_shift, result_offset_after_shift, min, max);
298 }
299 else
300 {
301 return reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(a, result_fixed_point_multiplier, result_shift, result_offset_after_shift, min, max);
302 }
303 }
304
305 TensorType _target{};
306 SimpleTensor<uint8_t> _reference{};
307};
Pablo Tello299025a2017-09-29 11:30:12 +0100308} // namespace validation
309} // namespace test
310} // namespace arm_compute
Chunosov5124be52017-11-22 20:42:13 +0700311#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */