blob: 8385221c78cc608c66debf7de5ebcb0594278106 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00002 * Copyright (c) 2017-2019 ARM Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "tests/AssetsLibrary.h"
30#include "tests/Globals.h"
31#include "tests/IAccessor.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Fixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010034#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000035#include "tests/validation/reference/GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036
37#include <random>
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
George Wort2d7e6832019-02-22 16:37:41 +000045namespace
46{
47template <typename U>
48void fill(U &&tensor, int i)
49{
50 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
51 std::uniform_int_distribution<> distribution(1, 254);
52 library->fill(tensor, distribution, i);
53}
54
55template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
56TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
57 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo())
58{
59 // Create tensors
60 TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1);
61 TensorType b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1);
62 TensorType output = create_tensor<TensorType>(shape_output, output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : DataType::QASYMM8, 1);
63
64 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
65 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
66
67 TensorType bias;
68 if(is_fused)
69 {
70 TensorShape bias_shape(shape_b[0]);
71 bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
72 }
73
74 // Create and configure function
75 // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
76 FunctionType gemmlowp;
77 // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
78 gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage));
79
80 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
81 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
82 ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
83
84 // Allocate tensors
85 a.allocator()->allocate();
86 b.allocator()->allocate();
87 output.allocator()->allocate();
88
89 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
90 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
91 ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
92
93 // Fill tensors
94 fill(AccessorType(a), 0);
95 fill(AccessorType(b), 1);
96
97 if(is_fused)
98 {
99 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
100 bias.allocator()->allocate();
101 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
102 fill(AccessorType(bias), 2);
103 }
104
105 // Compute GEMM function
106 gemmlowp.run();
107 return output;
108}
109
110template <bool reinterpret_input_as_3d>
111SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
112{
113 TensorShape shape_a_to_use = shape_a;
114 if(reinterpret_input_as_3d)
115 {
116 // Collapse the second and third dimension if the input is 3D
117 shape_a_to_use.collapse(2U, 1U);
118 }
119
120 // Create reference
121 SimpleTensor<uint8_t> a{ shape_a_to_use, DataType::QASYMM8, 1 };
122 SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 };
123
124 // Fill reference
125 fill(a, 0);
126 fill(b, 1);
127
128 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, shape_output, a_offset, b_offset);
129}
130}
131
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100132template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
Gian Marcoe75a02b2017-11-08 12:24:09 +0000133class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +0100134{
135public:
136 template <typename...>
George Wort2d7e6832019-02-22 16:37:41 +0000137 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100138 {
George Wort2d7e6832019-02-22 16:37:41 +0000139 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
140 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100141 }
142
143protected:
George Wort2d7e6832019-02-22 16:37:41 +0000144 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100145 {
George Wort2d7e6832019-02-22 16:37:41 +0000146 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100147 }
148
George Wort2d7e6832019-02-22 16:37:41 +0000149 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100150 {
George Wort2d7e6832019-02-22 16:37:41 +0000151 return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100152 }
153
Pablo Tello6ff12a02017-11-02 16:09:35 +0000154 TensorType _target{};
155 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100156};
157
George Wort2d7e6832019-02-22 16:37:41 +0000158template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
159class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
160{
161public:
162 template <typename...>
163 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
164 {
165 ARM_COMPUTE_EXPECT(output_stage.type != GEMMLowpOutputStageType::NONE, framework::LogLevel::ERRORS);
166 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage);
167 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage);
168 }
169
170protected:
171 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
172 {
173 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
174 output_stage);
175 }
176
177 SimpleTensor<qasymm8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
178 GEMMLowpOutputStageInfo output_stage)
179 {
180 SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
181
182 TensorShape bias_shape(shape_b[0]);
183 SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
184 fill(bias, 2);
185
186 switch(output_stage.type)
187 {
188 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
189 return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(output, bias,
190 output_stage.gemmlowp_offset, output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
191 break;
192 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
193 return reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(output, bias,
194 output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
195 break;
196 default:
197 ARM_COMPUTE_ERROR("Not Supported!");
198 }
199 }
200
201 TensorType _target{};
202 SimpleTensor<qasymm8_t> _reference{};
203};
204
Gian Marcoe75a02b2017-11-08 12:24:09 +0000205template <typename TensorType, typename AccessorType, typename FunctionType>
206class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
207{
208public:
209 template <typename...>
Gian Marco6b77e912017-11-17 09:27:57 +0000210 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000211 {
Gian Marco6b77e912017-11-17 09:27:57 +0000212 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
213 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000214 }
215
216protected:
217 template <typename U>
218 void fill(U &&tensor, int i)
219 {
220 std::uniform_int_distribution<> distribution(-6000, 6000);
221 library->fill(tensor, distribution, i);
222 }
223
Gian Marco6b77e912017-11-17 09:27:57 +0000224 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000225 {
Gian Marco6b77e912017-11-17 09:27:57 +0000226 TensorShape shape_bias(shape[0]);
227
Gian Marcoe75a02b2017-11-08 12:24:09 +0000228 // Create tensors
229 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
Gian Marco6b77e912017-11-17 09:27:57 +0000230 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
231 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000232
233 // Create and configure function
234 FunctionType output_stage;
Gian Marco6b77e912017-11-17 09:27:57 +0000235 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_offset, result_mult_int, result_shift, min, max);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000236
237 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000238 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000239
240 // Allocate tensors
241 a.allocator()->allocate();
Gian Marco6b77e912017-11-17 09:27:57 +0000242 c.allocator()->allocate();
Gian Marcoe75a02b2017-11-08 12:24:09 +0000243
244 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000245 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000246
Gian Marco6b77e912017-11-17 09:27:57 +0000247 // Fill tensor
Gian Marcoe75a02b2017-11-08 12:24:09 +0000248 fill(AccessorType(a), 0);
249
Gian Marco6b77e912017-11-17 09:27:57 +0000250 if(add_bias)
251 {
252 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
253
254 // Allocate bias tensor
255 b.allocator()->allocate();
256
257 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
258
259 // Fill tensor
260 fill(AccessorType(b), 1);
261 }
262
Gian Marcoe75a02b2017-11-08 12:24:09 +0000263 // Compute GEMM function
264 output_stage.run();
Gian Marco6b77e912017-11-17 09:27:57 +0000265 return c;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000266 }
267
Gian Marco6b77e912017-11-17 09:27:57 +0000268 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000269 {
270 // Create reference
Gian Marco6b77e912017-11-17 09:27:57 +0000271 TensorShape shape_bias(shape[0]);
272
Gian Marcoe75a02b2017-11-08 12:24:09 +0000273 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
Gian Marco6b77e912017-11-17 09:27:57 +0000274 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000275
276 // Fill reference
277 fill(a, 0);
278
Gian Marco6b77e912017-11-17 09:27:57 +0000279 if(add_bias)
280 {
281 // Fill bias
282 fill(b, 1);
283
284 return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, b, result_offset, result_mult_int, result_shift, min, max);
285 }
286 else
287 {
288 return reference::gemmlowp_quantize_down_int32_to_uint8_scale<int32_t>(a, result_offset, result_mult_int, result_shift, min, max);
289 }
Gian Marcoe75a02b2017-11-08 12:24:09 +0000290 }
291
292 TensorType _target{};
293 SimpleTensor<uint8_t> _reference{};
294};
Gian Marco58c57942017-11-28 09:10:03 +0000295
296template <typename TensorType, typename AccessorType, typename FunctionType>
297class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
298{
299public:
300 template <typename...>
301 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
302 {
303 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
304 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
305 }
306
307protected:
308 template <typename U>
309 void fill(U &&tensor, int i)
310 {
311 std::uniform_int_distribution<> distribution(-6000, 6000);
312 library->fill(tensor, distribution, i);
313 }
314
315 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
316 {
317 TensorShape shape_bias(shape[0]);
318
319 // Create tensors
320 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
321 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
322 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
323
324 // Create and configure function
325 FunctionType output_stage;
326 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
327
328 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
329 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
330
331 // Allocate tensors
332 a.allocator()->allocate();
333 c.allocator()->allocate();
334
335 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
336 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
337
338 // Fill tensor
339 fill(AccessorType(a), 0);
340
341 if(add_bias)
342 {
343 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
344
345 // Allocate bias tensor
346 b.allocator()->allocate();
347
348 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
349
350 // Fill tensor
351 fill(AccessorType(b), 1);
352 }
353
354 // Compute GEMM function
355 output_stage.run();
356 return c;
357 }
358
359 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
360 bool add_bias)
361 {
362 // Create reference
363 TensorShape shape_bias(shape[0]);
364
365 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
366 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
367
368 // Fill reference
369 fill(a, 0);
370
371 if(add_bias)
372 {
373 // Fill bias
374 fill(b, 1);
375
376 return reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(a, b, result_fixed_point_multiplier, result_shift, result_offset_after_shift, min, max);
377 }
378 else
379 {
380 return reference::gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint<int32_t>(a, result_fixed_point_multiplier, result_shift, result_offset_after_shift, min, max);
381 }
382 }
383
384 TensorType _target{};
385 SimpleTensor<uint8_t> _reference{};
386};
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000387
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100388template <typename TensorType, typename AccessorType, typename FunctionType>
389class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
390{
391public:
392 template <typename...>
393 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
394 {
395 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
396 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
397 }
398
399protected:
400 template <typename U>
401 void fill(U &&tensor, int i)
402 {
403 std::uniform_int_distribution<> distribution(-6000, 6000);
404 library->fill(tensor, distribution, i);
405 }
406
407 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
408 {
409 TensorShape shape_bias(shape[0]);
410
411 // Create tensors
412 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
413 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
414 TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
415
416 // Create and configure function
417 FunctionType output_stage;
418 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
419
420 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
421 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
422
423 // Allocate tensors
424 a.allocator()->allocate();
425 c.allocator()->allocate();
426
427 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
428 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
429
430 // Fill tensor
431 fill(AccessorType(a), 0);
432
433 if(add_bias)
434 {
435 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
436
437 // Allocate bias tensor
438 b.allocator()->allocate();
439
440 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
441
442 // Fill tensor
443 fill(AccessorType(b), 1);
444 }
445
446 // Compute GEMM function
447 output_stage.run();
448 return c;
449 }
450
451 SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
452 bool add_bias)
453 {
454 // Create reference
455 TensorShape shape_bias(shape[0]);
456
457 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
458 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
459
460 // Fill reference
461 fill(a, 0);
462
463 if(add_bias)
464 {
465 // Fill bias
466 fill(b, 1);
467
468 return reference::gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint<int32_t>(a, b, result_fixed_point_multiplier, result_shift, min, max);
469 }
470 else
471 {
472 return reference::gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint<int32_t>(a, result_fixed_point_multiplier, result_shift, min, max);
473 }
474 }
475
476 TensorType _target{};
477 SimpleTensor<int16_t> _reference{};
478};
479
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000480template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
481class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
482{
483public:
484 template <typename...>
485 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
486 bool interleave_rhs)
487 {
488 GEMMLHSMatrixInfo lhs_info;
489 lhs_info.m0 = m0;
490 lhs_info.k0 = k0;
491 lhs_info.v0 = v0;
492 lhs_info.interleave = interleave_lhs;
493 lhs_info.transpose = false;
494
495 GEMMRHSMatrixInfo rhs_info;
496 rhs_info.n0 = n0;
497 rhs_info.k0 = k0;
498 rhs_info.h0 = h0;
499 rhs_info.interleave = interleave_rhs;
500 rhs_info.transpose = true;
501
502 // Set the tensor shapes for LHS and RHS matrices
503 const TensorShape lhs_shape(k, m, batch_size);
504 const TensorShape rhs_shape(n, k, batch_size);
505
506 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
507 _reference = compute_reference(lhs_shape, rhs_shape);
508 }
509
510protected:
511 template <typename U>
512 void fill(U &&tensor, int i)
513 {
514 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
515 std::uniform_int_distribution<> distribution(1, 254);
516 library->fill(tensor, distribution, i);
517 }
518
519 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
520 {
521 // Create tensors
522 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
523 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
524 TensorType lhs_reshaped;
525 TensorType rhs_reshaped;
526 TensorType dst;
527
528 const unsigned int M = lhs_shape[1];
529 const unsigned int N = rhs_shape[0];
530 const unsigned int K = lhs_shape[0];
531
532 // The output tensor will be auto-initialized within the function
533
534 // Create and configure function
535 ReshapeLHSFunctionType reshape_lhs;
536 ReshapeRHSFunctionType reshape_rhs;
537 GEMMFunctionType gemm;
538 reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
539 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
540 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
541
542 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
543 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
544
545 // Allocate tensors
546 lhs.allocator()->allocate();
547 rhs.allocator()->allocate();
548 lhs_reshaped.allocator()->allocate();
549 rhs_reshaped.allocator()->allocate();
550 dst.allocator()->allocate();
551
552 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
553 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
554 ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
555 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
556 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
557
558 // Fill tensors
559 fill(AccessorType(lhs), 0);
560 fill(AccessorType(rhs), 1);
561
562 // Compute GEMM
563 reshape_lhs.run();
564 reshape_rhs.run();
565 gemm.run();
566
567 return dst;
568 }
569
570 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
571 {
572 TensorShape dst_shape = lhs_shape;
573 dst_shape[0] = rhs_shape[0];
574 dst_shape[1] = lhs_shape[1];
575
576 // Create reference
577 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
578 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
579
580 // Fill reference
581 fill(lhs, 0);
582 fill(rhs, 1);
583
584 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
585 }
586
587 TensorType _target{};
588 SimpleTensor<int32_t> _reference{};
589};
590
591template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
592class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
593{
594public:
595 template <typename...>
596 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
597 bool interleave_lhs, bool interleave_rhs)
598 {
599 GEMMLHSMatrixInfo lhs_info;
600 lhs_info.m0 = m0;
601 lhs_info.k0 = k0;
602 lhs_info.v0 = v0;
603 lhs_info.interleave = interleave_lhs;
604 lhs_info.transpose = false;
605
606 GEMMRHSMatrixInfo rhs_info;
607 rhs_info.n0 = n0;
608 rhs_info.k0 = k0;
609 rhs_info.h0 = h0;
610 rhs_info.interleave = interleave_rhs;
611 rhs_info.transpose = true;
612
613 // In case of GEMM3D, m is the product between m_w and m_h
614 const unsigned int m = m_w * m_h;
615
616 // Set the tensor shapes for LHS and RHS matrices
617 const TensorShape lhs_shape(k, m, batch_size);
618 const TensorShape rhs_shape(n, k, batch_size);
619
620 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
621 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
622 }
623
624protected:
625 template <typename U>
626 void fill(U &&tensor, int i)
627 {
628 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
629 std::uniform_int_distribution<> distribution(1, 254);
630 library->fill(tensor, distribution, i);
631 }
632
633 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
634 {
635 // Create tensors
636 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
637 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
638 TensorType lhs_reshaped;
639 TensorType rhs_reshaped;
640 TensorType dst;
641
642 const unsigned int M = lhs_shape[1];
643 const unsigned int N = rhs_shape[0];
644 const unsigned int K = lhs_shape[0];
645
646 // The output tensor will be auto-initialized within the function
647
648 // Create and configure function
649 ReshapeLHSFunctionType reshape_lhs;
650 ReshapeRHSFunctionType reshape_rhs;
651 GEMMFunctionType gemm;
652 reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
653 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
654 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
655
656 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
657 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
658
659 // Allocate tensors
660 lhs.allocator()->allocate();
661 rhs.allocator()->allocate();
662 lhs_reshaped.allocator()->allocate();
663 rhs_reshaped.allocator()->allocate();
664 dst.allocator()->allocate();
665
666 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
667 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
668 ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
669 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
670 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
671
672 // Fill tensors
673 fill(AccessorType(lhs), 0);
674 fill(AccessorType(rhs), 1);
675
676 // Compute GEMM
677 reshape_lhs.run();
678 reshape_rhs.run();
679 gemm.run();
680
681 return dst;
682 }
683
684 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
685 {
686 TensorShape dst_shape = lhs_shape;
687 dst_shape.set(0, rhs_shape[0]);
688 dst_shape.set(1, lhs_shape[1] / m_h);
689 dst_shape.set(2, m_h);
690 dst_shape.set(3, lhs_shape[2]);
691
692 // Create reference
693 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
694 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
695
696 // Fill reference
697 fill(lhs, 0);
698 fill(rhs, 1);
699
700 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
701 }
702
703 TensorType _target{};
704 SimpleTensor<int32_t> _reference{};
705};
Gian Marco Iodice62251f72019-03-11 16:07:12 +0000706
707template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
708class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
709{
710public:
711 template <typename...>
712 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs)
713 {
714 GEMMLHSMatrixInfo lhs_info;
715 lhs_info.m0 = m0;
716 lhs_info.k0 = k0;
717
718 GEMMRHSMatrixInfo rhs_info;
719 rhs_info.n0 = n0;
720 rhs_info.k0 = k0;
721 rhs_info.h0 = h0;
722 rhs_info.interleave = interleave_rhs;
723 rhs_info.transpose = transpose_rhs;
724
725 // Set the tensor shapes for LHS and RHS matrices
726 const TensorShape lhs_shape(k, m, batch_size);
727 const TensorShape rhs_shape(n, k, batch_size);
728
729 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
730 _reference = compute_reference(lhs_shape, rhs_shape);
731 }
732
733protected:
734 template <typename U>
735 void fill(U &&tensor, int i)
736 {
737 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
738 std::uniform_int_distribution<> distribution(1, 254);
739 library->fill(tensor, distribution, i);
740 }
741
742 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
743 {
744 // Create tensors
745 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
746 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
747 TensorType rhs_reshaped;
748 TensorType dst;
749
750 const unsigned int M = lhs_shape[1];
751 const unsigned int N = rhs_shape[0];
752 const unsigned int K = lhs_shape[0];
753
754 // The output tensor will be auto-initialized within the function
755
756 // Create and configure function
757 ReshapeRHSFunctionType reshape_rhs;
758 GEMMFunctionType gemm;
759 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
760 gemm.configure(&lhs, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
761
762 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
763 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
764
765 // Allocate tensors
766 lhs.allocator()->allocate();
767 rhs.allocator()->allocate();
768 rhs_reshaped.allocator()->allocate();
769 dst.allocator()->allocate();
770
771 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
772 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
773 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
774 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
775
776 // Fill tensors
777 fill(AccessorType(lhs), 0);
778 fill(AccessorType(rhs), 1);
779
780 // Compute GEMM
781 reshape_rhs.run();
782 gemm.run();
783
784 return dst;
785 }
786
787 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
788 {
789 TensorShape dst_shape = lhs_shape;
790 dst_shape[0] = rhs_shape[0];
791 dst_shape[1] = lhs_shape[1];
792
793 // Create reference
794 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
795 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
796
797 // Fill reference
798 fill(lhs, 0);
799 fill(rhs, 1);
800
801 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
802 }
803
804 TensorType _target{};
805 SimpleTensor<int32_t> _reference{};
806};
807
808template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
809class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
810{
811public:
812 template <typename...>
813 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0,
814 bool interleave_rhs, bool transpose_rhs)
815 {
816 GEMMLHSMatrixInfo lhs_info;
817 lhs_info.m0 = m0;
818 lhs_info.k0 = k0;
819
820 GEMMRHSMatrixInfo rhs_info;
821 rhs_info.n0 = n0;
822 rhs_info.k0 = k0;
823 rhs_info.h0 = h0;
824 rhs_info.interleave = interleave_rhs;
825 rhs_info.transpose = transpose_rhs;
826
827 // In case of GEMM3D, m is the product between m_w and m_h
828 const unsigned int m = m_w * m_h;
829
830 // Set the tensor shapes for LHS and RHS matrices
831 const TensorShape lhs_shape(k, m, batch_size);
832 const TensorShape rhs_shape(n, k, batch_size);
833
834 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
835 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
836 }
837
838protected:
839 template <typename U>
840 void fill(U &&tensor, int i)
841 {
842 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
843 std::uniform_int_distribution<> distribution(1, 254);
844 library->fill(tensor, distribution, i);
845 }
846
847 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
848 {
849 // Create tensors
850 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
851 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
852 TensorType rhs_reshaped;
853 TensorType dst;
854
855 const unsigned int M = lhs_shape[1];
856 const unsigned int N = rhs_shape[0];
857 const unsigned int K = lhs_shape[0];
858
859 // The output tensor will be auto-initialized within the function
860
861 // Create and configure function
862 ReshapeRHSFunctionType reshape_rhs;
863 GEMMFunctionType gemm;
864 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
865 gemm.configure(&lhs, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
866
867 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
868 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
869
870 // Allocate tensors
871 lhs.allocator()->allocate();
872 rhs.allocator()->allocate();
873 rhs_reshaped.allocator()->allocate();
874 dst.allocator()->allocate();
875
876 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
877 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
878 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
879 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
880
881 // Fill tensors
882 fill(AccessorType(lhs), 0);
883 fill(AccessorType(rhs), 1);
884
885 // Compute GEMM
886 reshape_rhs.run();
887 gemm.run();
888
889 return dst;
890 }
891
892 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
893 {
894 TensorShape dst_shape = lhs_shape;
895 dst_shape.set(0, rhs_shape[0]);
896 dst_shape.set(1, lhs_shape[1] / m_h);
897 dst_shape.set(2, m_h);
898 dst_shape.set(3, lhs_shape[2]);
899
900 // Create reference
901 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
902 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
903
904 // Fill reference
905 fill(lhs, 0);
906 fill(rhs, 1);
907
908 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
909 }
910
911 TensorType _target{};
912 SimpleTensor<int32_t> _reference{};
913};
Gian Marco Iodicee7510622019-06-03 17:28:17 +0100914
915template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
916class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture
917{
918public:
919 template <typename...>
920 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
921 {
922 GEMMLHSMatrixInfo lhs_info;
923 lhs_info.m0 = m0;
924 lhs_info.k0 = k0;
925
926 GEMMRHSMatrixInfo rhs_info;
927 rhs_info.n0 = n0;
928 rhs_info.k0 = k0;
929
930 // Set the tensor shapes for LHS and RHS matrices
931 const TensorShape lhs_shape(k, m, batch_size);
932 const TensorShape rhs_shape(n, k, batch_size);
933
934 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
935 _reference = compute_reference(lhs_shape, rhs_shape);
936 }
937
938protected:
939 template <typename U>
940 void fill(U &&tensor, int i)
941 {
942 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
943 std::uniform_int_distribution<> distribution(1, 254);
944 library->fill(tensor, distribution, i);
945 }
946
947 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
948 {
949 // Create tensors
950 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
951 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
952 TensorType dst;
953
954 const unsigned int M = lhs_shape[1];
955 const unsigned int N = rhs_shape[0];
956 const unsigned int K = lhs_shape[0];
957
958 // The output tensor will be auto-initialized within the function
959
960 // Create and configure function
961 GEMMFunctionType gemm;
962 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
963
964 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
965 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
966
967 // Allocate tensors
968 lhs.allocator()->allocate();
969 rhs.allocator()->allocate();
970 dst.allocator()->allocate();
971
972 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
973 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
974 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
975
976 // Fill tensors
977 fill(AccessorType(lhs), 0);
978 fill(AccessorType(rhs), 1);
979
980 // Compute GEMM
981 gemm.run();
982
983 return dst;
984 }
985
986 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
987 {
988 TensorShape dst_shape = lhs_shape;
989 dst_shape[0] = rhs_shape[0];
990 dst_shape[1] = lhs_shape[1];
991
992 // Create reference
993 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
994 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
995
996 // Fill reference
997 fill(lhs, 0);
998 fill(rhs, 1);
999
1000 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1001 }
1002
1003 TensorType _target{};
1004 SimpleTensor<int32_t> _reference{};
1005};
1006
1007template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1008class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture
1009{
1010public:
1011 template <typename...>
1012 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1013 {
1014 GEMMLHSMatrixInfo lhs_info;
1015 lhs_info.m0 = m0;
1016 lhs_info.k0 = k0;
1017
1018 GEMMRHSMatrixInfo rhs_info;
1019 rhs_info.n0 = n0;
1020 rhs_info.k0 = k0;
1021
1022 // In case of GEMM3D, m is the product between m_w and m_h
1023 const unsigned int m = m_w * m_h;
1024
1025 // Set the tensor shapes for LHS and RHS matrices
1026 const TensorShape lhs_shape(k, m, batch_size);
1027 const TensorShape rhs_shape(n, k, batch_size);
1028
1029 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
1030 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
1031 }
1032
1033protected:
1034 template <typename U>
1035 void fill(U &&tensor, int i)
1036 {
1037 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1038 std::uniform_int_distribution<> distribution(1, 254);
1039 library->fill(tensor, distribution, i);
1040 }
1041
1042 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
1043 {
1044 // Create tensors
1045 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1046 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1047 TensorType dst;
1048
1049 const unsigned int M = lhs_shape[1];
1050 const unsigned int N = rhs_shape[0];
1051 const unsigned int K = lhs_shape[0];
1052
1053 // The output tensor will be auto-initialized within the function
1054
1055 // Create and configure function
1056 GEMMFunctionType gemm;
1057 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1058
1059 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1060 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1061
1062 // Allocate tensors
1063 lhs.allocator()->allocate();
1064 rhs.allocator()->allocate();
1065 dst.allocator()->allocate();
1066
1067 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1068 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1069 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1070
1071 // Fill tensors
1072 fill(AccessorType(lhs), 0);
1073 fill(AccessorType(rhs), 1);
1074
1075 // Compute GEMM
1076 gemm.run();
1077
1078 return dst;
1079 }
1080
1081 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
1082 {
1083 TensorShape dst_shape = lhs_shape;
1084 dst_shape.set(0, rhs_shape[0]);
1085 dst_shape.set(1, lhs_shape[1] / m_h);
1086 dst_shape.set(2, m_h);
1087 dst_shape.set(3, lhs_shape[2]);
1088
1089 // Create reference
1090 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1091 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1092
1093 // Fill reference
1094 fill(lhs, 0);
1095 fill(rhs, 1);
1096
1097 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1098 }
1099
1100 TensorType _target{};
1101 SimpleTensor<int32_t> _reference{};
1102};
Pablo Tello299025a2017-09-29 11:30:12 +01001103} // namespace validation
1104} // namespace test
1105} // namespace arm_compute
George Wort2d7e6832019-02-22 16:37:41 +00001106#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */