blob: e3dc7381fc686a191ff8bce348d8a0aa79939cee [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Manuel Bottini959c26d2019-12-02 16:22:35 +00002 * Copyright (c) 2017-2020 ARM Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
Michele Di Giorgiob54ba282020-01-14 15:31:55 +000027#include "arm_compute/core/KernelDescriptors.h"
Pablo Tello299025a2017-09-29 11:30:12 +010028#include "arm_compute/core/TensorShape.h"
29#include "arm_compute/core/Types.h"
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Pablo Tello299025a2017-09-29 11:30:12 +010031#include "tests/AssetsLibrary.h"
32#include "tests/Globals.h"
33#include "tests/IAccessor.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Fixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010038
39#include <random>
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
George Wort2d7e6832019-02-22 16:37:41 +000047namespace
48{
49template <typename U>
50void fill(U &&tensor, int i)
51{
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000052 switch(tensor.data_type())
53 {
54 case DataType::QSYMM8_PER_CHANNEL:
55 {
56 int min_bound = 128;
57 int max_bound = -127;
58 for(size_t j = 0; j < tensor.quantization_info().scale().size(); j++)
59 {
60 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
61 if(bounds.first < min_bound)
62 {
63 min_bound = bounds.first;
64 }
65 if(bounds.second > max_bound)
66 {
67 max_bound = bounds.second;
68 }
69 }
70 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
71 library->fill(tensor, distribution, i);
72 break;
73 }
74 case DataType::QASYMM8:
75 {
76 std::uniform_int_distribution<uint8_t> distribution(1, 254);
77 library->fill(tensor, distribution, i);
78 break;
79 }
80 case DataType::F16:
81 case DataType::F32:
82 {
83 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
84 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
85 library->fill(tensor, distribution, i);
86 break;
87 }
88 default:
89 library->fill_tensor_uniform(tensor, i);
90 }
George Wort2d7e6832019-02-22 16:37:41 +000091}
92
93template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
94TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +000095 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
96 QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +000097{
98 // Create tensors
Manuel Bottini959c26d2019-12-02 16:22:35 +000099 DataType data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
100
101 TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000102 TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
Manuel Bottini959c26d2019-12-02 16:22:35 +0000103 TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1);
George Wort2d7e6832019-02-22 16:37:41 +0000104
105 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
George Wort2d7e6832019-02-22 16:37:41 +0000106
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000107 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
108 {
109 b.info()->set_quantization_info(b_qinfo);
110 }
111 else
112 {
113 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
114 }
George Wort2d7e6832019-02-22 16:37:41 +0000115 TensorType bias;
116 if(is_fused)
117 {
118 TensorShape bias_shape(shape_b[0]);
119 bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
120 }
121
122 // Create and configure function
123 // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
124 FunctionType gemmlowp;
125 // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
126 gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage));
127
128 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
129 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
130 ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
131
132 // Allocate tensors
133 a.allocator()->allocate();
134 b.allocator()->allocate();
135 output.allocator()->allocate();
136
137 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
138 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
139 ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
140
141 // Fill tensors
142 fill(AccessorType(a), 0);
143 fill(AccessorType(b), 1);
144
145 if(is_fused)
146 {
147 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
148 bias.allocator()->allocate();
149 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
150 fill(AccessorType(bias), 2);
151 }
George Wort2d7e6832019-02-22 16:37:41 +0000152 // Compute GEMM function
153 gemmlowp.run();
154 return output;
155}
156
Manuel Bottini959c26d2019-12-02 16:22:35 +0000157template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000158SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000159 DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +0000160{
161 TensorShape shape_a_to_use = shape_a;
162 if(reinterpret_input_as_3d)
163 {
164 // Collapse the second and third dimension if the input is 3D
165 shape_a_to_use.collapse(2U, 1U);
166 }
167
168 // Create reference
Manuel Bottini959c26d2019-12-02 16:22:35 +0000169 SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1 };
170 SimpleTensor<TW> b{ shape_b, data_type_b, 1, data_type_b == DataType::QSYMM8_PER_CHANNEL ? b_qinfo : QuantizationInfo(1.0f / 255, b_offset) };
George Wort2d7e6832019-02-22 16:37:41 +0000171
172 // Fill reference
173 fill(a, 0);
174 fill(b, 1);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000175 return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>(a, b, shape_output, a_offset, b_offset);
George Wort2d7e6832019-02-22 16:37:41 +0000176}
177}
178
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100179template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
Gian Marcoe75a02b2017-11-08 12:24:09 +0000180class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +0100181{
182public:
183 template <typename...>
George Wort2d7e6832019-02-22 16:37:41 +0000184 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100185 {
George Wort2d7e6832019-02-22 16:37:41 +0000186 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
187 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100188 }
189
190protected:
George Wort2d7e6832019-02-22 16:37:41 +0000191 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100192 {
George Wort2d7e6832019-02-22 16:37:41 +0000193 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100194 }
195
George Wort2d7e6832019-02-22 16:37:41 +0000196 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100197 {
George Wort2d7e6832019-02-22 16:37:41 +0000198 return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100199 }
200
Pablo Tello6ff12a02017-11-02 16:09:35 +0000201 TensorType _target{};
202 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100203};
204
Manuel Bottini959c26d2019-12-02 16:22:35 +0000205template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t>
George Wort2d7e6832019-02-22 16:37:41 +0000206class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
207{
208public:
209 template <typename...>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000210 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b)
George Wort2d7e6832019-02-22 16:37:41 +0000211 {
212 ARM_COMPUTE_EXPECT(output_stage.type != GEMMLowpOutputStageType::NONE, framework::LogLevel::ERRORS);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000213 DataType data_type_a = data_type_b == DataType::QASYMM8_SIGNED ? DataType::QASYMM8_SIGNED : DataType::QASYMM8;
214
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000215 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
216 {
217 output_stage.is_quantized_per_channel = true;
218 const size_t num_channels = shape_b[0];
219 std::vector<float> scales(num_channels);
220 std::uniform_real_distribution<> distribution(0, 1);
221 library->fill(scales, distribution, 0);
222 output_stage.gemmlowp_multipliers.resize(num_channels);
223 output_stage.gemmlowp_shifts.resize(num_channels);
224 for(size_t i = 0; i < num_channels; ++i)
225 {
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000226 quantization::calculate_quantized_multiplier(scales[i], &output_stage.gemmlowp_multipliers[i], &output_stage.gemmlowp_shifts[i]);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000227 }
228
Manuel Bottini959c26d2019-12-02 16:22:35 +0000229 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
230 _target = compute_target(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000231 }
232 else
233 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000234 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
235 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000236 }
George Wort2d7e6832019-02-22 16:37:41 +0000237 }
238
239protected:
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000240 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000241 DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000242 {
243 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000244 output_stage, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000245 }
246
Manuel Bottini959c26d2019-12-02 16:22:35 +0000247 SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
248 GEMMLowpOutputStageInfo output_stage, DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000249 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000250 SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW>(shape_a, shape_b, shape_output, a_offset, b_offset, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000251
252 TensorShape bias_shape(shape_b[0]);
253 SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
254 fill(bias, 2);
255
256 switch(output_stage.type)
257 {
258 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000259 return reference::gemmlowp_quantize_down_scale<int32_t, TW>(output, bias,
260 output_stage.gemmlowp_offset, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000261 break;
262 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000263 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TW>(output, bias,
264 output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000265 break;
266 default:
267 ARM_COMPUTE_ERROR("Not Supported!");
268 }
269 }
270
Manuel Bottini959c26d2019-12-02 16:22:35 +0000271 TensorType _target{};
272 SimpleTensor<TI> _reference{};
George Wort2d7e6832019-02-22 16:37:41 +0000273};
274
Gian Marcoe75a02b2017-11-08 12:24:09 +0000275template <typename TensorType, typename AccessorType, typename FunctionType>
276class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
277{
278public:
279 template <typename...>
Gian Marco6b77e912017-11-17 09:27:57 +0000280 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000281 {
Gian Marco6b77e912017-11-17 09:27:57 +0000282 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
283 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000284 }
285
286protected:
287 template <typename U>
288 void fill(U &&tensor, int i)
289 {
290 std::uniform_int_distribution<> distribution(-6000, 6000);
291 library->fill(tensor, distribution, i);
292 }
293
Gian Marco6b77e912017-11-17 09:27:57 +0000294 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000295 {
Gian Marco6b77e912017-11-17 09:27:57 +0000296 TensorShape shape_bias(shape[0]);
297
Gian Marcoe75a02b2017-11-08 12:24:09 +0000298 // Create tensors
299 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
Gian Marco6b77e912017-11-17 09:27:57 +0000300 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
301 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000302
303 // Create and configure function
Luca Foschiani4b869532020-02-13 15:07:36 +0000304 FunctionType output_stage;
305 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
306 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
307 output_stage_info.gemmlowp_offset = result_offset;
308 output_stage_info.gemmlowp_multiplier = result_mult_int;
309 output_stage_info.gemmlowp_shift = result_shift;
310 output_stage_info.gemmlowp_min_bound = min;
311 output_stage_info.gemmlowp_max_bound = max;
312 output_stage_info.output_data_type = DataType::QASYMM8;
313 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000314
315 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000316 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000317
318 // Allocate tensors
319 a.allocator()->allocate();
Gian Marco6b77e912017-11-17 09:27:57 +0000320 c.allocator()->allocate();
Gian Marcoe75a02b2017-11-08 12:24:09 +0000321
322 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000323 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000324
Gian Marco6b77e912017-11-17 09:27:57 +0000325 // Fill tensor
Gian Marcoe75a02b2017-11-08 12:24:09 +0000326 fill(AccessorType(a), 0);
327
Gian Marco6b77e912017-11-17 09:27:57 +0000328 if(add_bias)
329 {
330 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
331
332 // Allocate bias tensor
333 b.allocator()->allocate();
334
335 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
336
337 // Fill tensor
338 fill(AccessorType(b), 1);
339 }
340
Gian Marcoe75a02b2017-11-08 12:24:09 +0000341 // Compute GEMM function
342 output_stage.run();
Gian Marco6b77e912017-11-17 09:27:57 +0000343 return c;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000344 }
345
Gian Marco6b77e912017-11-17 09:27:57 +0000346 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000347 {
348 // Create reference
Gian Marco6b77e912017-11-17 09:27:57 +0000349 TensorShape shape_bias(shape[0]);
350
Gian Marcoe75a02b2017-11-08 12:24:09 +0000351 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
Gian Marco6b77e912017-11-17 09:27:57 +0000352 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000353
354 // Fill reference
355 fill(a, 0);
356
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000357 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
358 const std::vector<int32_t> result_shift_vec = { result_shift };
359
Gian Marco6b77e912017-11-17 09:27:57 +0000360 if(add_bias)
361 {
362 // Fill bias
363 fill(b, 1);
364
Manuel Bottini959c26d2019-12-02 16:22:35 +0000365 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000366 }
367 else
368 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000369 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000370 }
Gian Marcoe75a02b2017-11-08 12:24:09 +0000371 }
372
373 TensorType _target{};
374 SimpleTensor<uint8_t> _reference{};
375};
Gian Marco58c57942017-11-28 09:10:03 +0000376
377template <typename TensorType, typename AccessorType, typename FunctionType>
Luca Foschiani4b869532020-02-13 15:07:36 +0000378class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture
379{
380public:
381 template <typename...>
382 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
383 {
384 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
385 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
386 }
387
388protected:
389 template <typename U>
390 void fill(U &&tensor, int i)
391 {
392 std::uniform_int_distribution<> distribution(-6000, 6000);
393 library->fill(tensor, distribution, i);
394 }
395
396 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
397 {
398 TensorShape shape_bias(shape[0]);
399
400 // Create tensors
401 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
402 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
403 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
404
405 // Create and configure function
406 FunctionType output_stage;
407 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
408 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
409 output_stage_info.gemmlowp_offset = result_offset;
410 output_stage_info.gemmlowp_multiplier = result_mult_int;
411 output_stage_info.gemmlowp_shift = result_shift;
412 output_stage_info.gemmlowp_min_bound = min;
413 output_stage_info.gemmlowp_max_bound = max;
414 output_stage_info.output_data_type = DataType::QASYMM8_SIGNED;
415 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
416
417 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
418 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
419
420 // Allocate tensors
421 a.allocator()->allocate();
422 c.allocator()->allocate();
423
424 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
425 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
426
427 // Fill tensor
428 fill(AccessorType(a), 0);
429
430 if(add_bias)
431 {
432 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
433
434 // Allocate bias tensor
435 b.allocator()->allocate();
436
437 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
438
439 // Fill tensor
440 fill(AccessorType(b), 1);
441 }
442
443 // Compute GEMM function
444 output_stage.run();
445 return c;
446 }
447
448 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
449 {
450 // Create reference
451 TensorShape shape_bias(shape[0]);
452
453 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
454 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
455
456 // Fill reference
457 fill(a, 0);
458
459 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
460 const std::vector<int32_t> result_shift_vec = { result_shift };
461
462 if(add_bias)
463 {
464 // Fill bias
465 fill(b, 1);
466
467 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
468 }
469 else
470 {
471 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
472 }
473 }
474
475 TensorType _target{};
476 SimpleTensor<int8_t> _reference{};
477};
478
479template <typename TensorType, typename AccessorType, typename FunctionType>
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000480class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture
481{
482public:
483 template <typename...>
484 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
485 {
486 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
487 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
488 }
489
490protected:
491 template <typename U>
492 void fill(U &&tensor, int i)
493 {
494 std::uniform_int_distribution<> distribution(-6000, 6000);
495 library->fill(tensor, distribution, i);
496 }
497
498 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
499 {
500 TensorShape shape_bias(shape[0]);
501
502 // Create tensors
503 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
504 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
505 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
506
507 // Create and configure function
508 FunctionType output_stage;
509 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
510
511 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
512 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
513
514 // Allocate tensors
515 a.allocator()->allocate();
516 c.allocator()->allocate();
517
518 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
519 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
520
521 // Fill tensor
522 fill(AccessorType(a), 0);
523
524 if(add_bias)
525 {
526 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
527
528 // Allocate bias tensor
529 b.allocator()->allocate();
530
531 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
532
533 // Fill tensor
534 fill(AccessorType(b), 1);
535 }
536
537 // Compute GEMM function
538 output_stage.run();
539 return c;
540 }
541
542 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
543 bool add_bias)
544 {
545 // Create reference
546 TensorShape shape_bias(shape[0]);
547
548 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
549 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
550
551 // Fill reference
552 fill(a, 0);
553
554 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
555 const std::vector<int32_t> result_shift_vec = { result_shift };
556
557 if(add_bias)
558 {
559 // Fill bias
560 fill(b, 1);
561
562 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
563 }
564 else
565 {
566 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
567 }
568 }
569
570 TensorType _target{};
571 SimpleTensor<int8_t> _reference{};
572};
573
574template <typename TensorType, typename AccessorType, typename FunctionType>
Gian Marco58c57942017-11-28 09:10:03 +0000575class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
576{
577public:
578 template <typename...>
579 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
580 {
581 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
582 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
583 }
584
585protected:
586 template <typename U>
587 void fill(U &&tensor, int i)
588 {
589 std::uniform_int_distribution<> distribution(-6000, 6000);
590 library->fill(tensor, distribution, i);
591 }
592
593 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
594 {
595 TensorShape shape_bias(shape[0]);
596
597 // Create tensors
598 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
599 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
600 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
601
602 // Create and configure function
603 FunctionType output_stage;
604 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
605
606 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
607 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
608
609 // Allocate tensors
610 a.allocator()->allocate();
611 c.allocator()->allocate();
612
613 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
614 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
615
616 // Fill tensor
617 fill(AccessorType(a), 0);
618
619 if(add_bias)
620 {
621 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
622
623 // Allocate bias tensor
624 b.allocator()->allocate();
625
626 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
627
628 // Fill tensor
629 fill(AccessorType(b), 1);
630 }
631
632 // Compute GEMM function
633 output_stage.run();
634 return c;
635 }
636
637 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
638 bool add_bias)
639 {
640 // Create reference
641 TensorShape shape_bias(shape[0]);
642
643 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
644 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
645
646 // Fill reference
647 fill(a, 0);
648
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000649 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
650 const std::vector<int32_t> result_shift_vec = { result_shift };
651
Gian Marco58c57942017-11-28 09:10:03 +0000652 if(add_bias)
653 {
654 // Fill bias
655 fill(b, 1);
656
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000657 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000658 }
659 else
660 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000661 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000662 }
663 }
664
665 TensorType _target{};
666 SimpleTensor<uint8_t> _reference{};
667};
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000668
Sheri Zhang1b14c752020-03-09 14:29:52 +0000669template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
670class GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture : public framework::Fixture
671{
672public:
673 template <typename...>
674 void setup(DataType data_type, TensorShape shape, float result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
675 {
676 _target = compute_target(data_type, shape, result_real_multiplier, result_offset, min, max, add_bias);
677 _reference = compute_reference(shape, result_real_multiplier, result_offset, min, max, add_bias);
678 }
679
680protected:
681 template <typename U>
682 void fill(U &&tensor, int i)
683 {
684 // To avoid data all being clampped
685 std::uniform_int_distribution<> distribution(-500, 500);
686 library->fill(tensor, distribution, i);
687 }
688
689 TensorType compute_target(DataType data_type, const TensorShape &shape, float result_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
690 {
691 TensorShape shape_bias(shape[0]);
692
693 // Create tensors
694 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
695 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
696 TensorType c = create_tensor<TensorType>(shape, data_type, 1);
697
698 // create output stage info
699 GEMMLowpOutputStageInfo info;
700 info.gemmlowp_max_bound = max;
701 info.gemmlowp_min_bound = min;
702 info.gemmlowp_real_multiplier = result_multiplier;
703 info.gemmlowp_offset = result_offset;
704 info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT;
705 info.output_data_type = data_type;
706
707 // Create and configure function
708 FunctionType output_stage;
709 output_stage.configure(&a, add_bias ? &b : nullptr, &c, info);
710
711 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
712 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
713
714 // Allocate tensors
715 a.allocator()->allocate();
716 c.allocator()->allocate();
717
718 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
719 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
720
721 // Fill tensor
722 fill(AccessorType(a), 0);
723
724 if(add_bias)
725 {
726 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
727
728 // Allocate bias tensor
729 b.allocator()->allocate();
730
731 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
732
733 // Fill tensor
734 fill(AccessorType(b), 1);
735 }
736
737 // Compute GEMM function
738 output_stage.run();
739 return c;
740 }
741
742 SimpleTensor<T> compute_reference(const TensorShape &shape, float_t result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
743 {
744 // Create reference
745 TensorShape shape_bias(shape[0]);
746
747 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
748 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
749
750 // Fill reference
751 fill(a, 0);
752
753 const std::vector<float_t> result_float_multiplier_vec = { result_real_multiplier };
754
755 if(add_bias)
756 {
757 // Fill bias
758 fill(b, 1);
759
760 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, b, result_float_multiplier_vec, result_offset, min, max);
761 }
762 else
763 {
764 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, result_float_multiplier_vec, result_offset, min, max);
765 }
766 }
767
768 TensorType _target{};
769 SimpleTensor<T> _reference{};
770};
771
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100772template <typename TensorType, typename AccessorType, typename FunctionType>
773class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
774{
775public:
776 template <typename...>
777 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
778 {
779 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
780 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
781 }
782
783protected:
784 template <typename U>
785 void fill(U &&tensor, int i)
786 {
787 std::uniform_int_distribution<> distribution(-6000, 6000);
788 library->fill(tensor, distribution, i);
789 }
790
791 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
792 {
793 TensorShape shape_bias(shape[0]);
794
795 // Create tensors
796 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
797 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
798 TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
799
800 // Create and configure function
801 FunctionType output_stage;
802 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
803
804 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
805 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
806
807 // Allocate tensors
808 a.allocator()->allocate();
809 c.allocator()->allocate();
810
811 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
812 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
813
814 // Fill tensor
815 fill(AccessorType(a), 0);
816
817 if(add_bias)
818 {
819 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
820
821 // Allocate bias tensor
822 b.allocator()->allocate();
823
824 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
825
826 // Fill tensor
827 fill(AccessorType(b), 1);
828 }
829
830 // Compute GEMM function
831 output_stage.run();
832 return c;
833 }
834
835 SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
836 bool add_bias)
837 {
838 // Create reference
839 TensorShape shape_bias(shape[0]);
840
841 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
842 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
843
844 // Fill reference
845 fill(a, 0);
846
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000847 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
848 const std::vector<int32_t> result_shift_vec = { result_shift };
849
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100850 if(add_bias)
851 {
852 // Fill bias
853 fill(b, 1);
854
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000855 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100856 }
857 else
858 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000859 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100860 }
861 }
862
863 TensorType _target{};
864 SimpleTensor<int16_t> _reference{};
865};
866
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000867template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
868class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
869{
870public:
871 template <typename...>
872 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
Sheri Zhang28287af2020-02-25 14:13:54 +0000873 bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000874 {
875 GEMMLHSMatrixInfo lhs_info;
876 lhs_info.m0 = m0;
877 lhs_info.k0 = k0;
878 lhs_info.v0 = v0;
879 lhs_info.interleave = interleave_lhs;
880 lhs_info.transpose = false;
881
882 GEMMRHSMatrixInfo rhs_info;
883 rhs_info.n0 = n0;
884 rhs_info.k0 = k0;
885 rhs_info.h0 = h0;
886 rhs_info.interleave = interleave_rhs;
887 rhs_info.transpose = true;
888
889 // Set the tensor shapes for LHS and RHS matrices
890 const TensorShape lhs_shape(k, m, batch_size);
891 const TensorShape rhs_shape(n, k, batch_size);
892
Sheri Zhang28287af2020-02-25 14:13:54 +0000893 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
894 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000895 }
896
897protected:
898 template <typename U>
899 void fill(U &&tensor, int i)
900 {
Sheri Zhang28287af2020-02-25 14:13:54 +0000901 switch(tensor.data_type())
902 {
903 case DataType::QASYMM8:
904 {
905 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
906 std::uniform_int_distribution<> distribution(1, 254);
907 library->fill(tensor, distribution, i);
908 }
909 break;
910 case DataType::QASYMM8_SIGNED:
911 {
912 std::uniform_int_distribution<> distribution(-127, 126);
913 library->fill(tensor, distribution, i);
914 }
915 break;
916 default:
917 ARM_COMPUTE_ERROR("Unsupported data type");
918 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000919 }
920
Sheri Zhang28287af2020-02-25 14:13:54 +0000921 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000922 {
923 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +0000924 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
925 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000926 TensorType lhs_reshaped;
927 TensorType rhs_reshaped;
928 TensorType dst;
929
930 const unsigned int M = lhs_shape[1];
931 const unsigned int N = rhs_shape[0];
932 const unsigned int K = lhs_shape[0];
933
934 // The output tensor will be auto-initialized within the function
935
936 // Create and configure function
937 ReshapeLHSFunctionType reshape_lhs;
938 ReshapeRHSFunctionType reshape_rhs;
939 GEMMFunctionType gemm;
940 reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
941 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
942 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
943
944 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
945 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
946
947 // Allocate tensors
948 lhs.allocator()->allocate();
949 rhs.allocator()->allocate();
950 lhs_reshaped.allocator()->allocate();
951 rhs_reshaped.allocator()->allocate();
952 dst.allocator()->allocate();
953
954 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
955 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
956 ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
957 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
958 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
959
960 // Fill tensors
961 fill(AccessorType(lhs), 0);
962 fill(AccessorType(rhs), 1);
963
964 // Compute GEMM
965 reshape_lhs.run();
966 reshape_rhs.run();
967 gemm.run();
968
969 return dst;
970 }
971
Sheri Zhang28287af2020-02-25 14:13:54 +0000972 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000973 {
974 TensorShape dst_shape = lhs_shape;
975 dst_shape[0] = rhs_shape[0];
976 dst_shape[1] = lhs_shape[1];
977
Sheri Zhang28287af2020-02-25 14:13:54 +0000978 switch(data_type)
979 {
980 case DataType::QASYMM8:
981 {
982 // Create reference
983 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
984 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000985
Sheri Zhang28287af2020-02-25 14:13:54 +0000986 // Fill reference
987 fill(lhs, 0);
988 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000989
Sheri Zhang28287af2020-02-25 14:13:54 +0000990 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
991 }
992 case DataType::QASYMM8_SIGNED:
993 {
994 // Create reference
995 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
996 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
997
998 // Fill reference
999 fill(lhs, 0);
1000 fill(rhs, 1);
1001
1002 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1003 }
1004 default:
1005 ARM_COMPUTE_ERROR("Unsupported data type");
1006 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001007 }
1008
1009 TensorType _target{};
1010 SimpleTensor<int32_t> _reference{};
1011};
1012
1013template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
1014class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
1015{
1016public:
1017 template <typename...>
1018 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
Sheri Zhang28287af2020-02-25 14:13:54 +00001019 bool interleave_lhs, bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001020 {
1021 GEMMLHSMatrixInfo lhs_info;
1022 lhs_info.m0 = m0;
1023 lhs_info.k0 = k0;
1024 lhs_info.v0 = v0;
1025 lhs_info.interleave = interleave_lhs;
1026 lhs_info.transpose = false;
1027
1028 GEMMRHSMatrixInfo rhs_info;
1029 rhs_info.n0 = n0;
1030 rhs_info.k0 = k0;
1031 rhs_info.h0 = h0;
1032 rhs_info.interleave = interleave_rhs;
1033 rhs_info.transpose = true;
1034
1035 // In case of GEMM3D, m is the product between m_w and m_h
1036 const unsigned int m = m_w * m_h;
1037
1038 // Set the tensor shapes for LHS and RHS matrices
1039 const TensorShape lhs_shape(k, m, batch_size);
1040 const TensorShape rhs_shape(n, k, batch_size);
1041
Sheri Zhang28287af2020-02-25 14:13:54 +00001042 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1043 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001044 }
1045
1046protected:
1047 template <typename U>
1048 void fill(U &&tensor, int i)
1049 {
Sheri Zhang28287af2020-02-25 14:13:54 +00001050 switch(tensor.data_type())
1051 {
1052 case DataType::QASYMM8:
1053 {
1054 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1055 std::uniform_int_distribution<> distribution(1, 254);
1056 library->fill(tensor, distribution, i);
1057 }
1058 break;
1059 case DataType::QASYMM8_SIGNED:
1060 {
1061 std::uniform_int_distribution<> distribution(-127, 126);
1062 library->fill(tensor, distribution, i);
1063 }
1064 break;
1065 default:
1066 ARM_COMPUTE_ERROR("Unsupported data type");
1067 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001068 }
1069
Sheri Zhang28287af2020-02-25 14:13:54 +00001070 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h,
1071 DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001072 {
1073 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +00001074 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1075 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001076 TensorType lhs_reshaped;
1077 TensorType rhs_reshaped;
1078 TensorType dst;
1079
1080 const unsigned int M = lhs_shape[1];
1081 const unsigned int N = rhs_shape[0];
1082 const unsigned int K = lhs_shape[0];
1083
1084 // The output tensor will be auto-initialized within the function
1085
1086 // Create and configure function
1087 ReshapeLHSFunctionType reshape_lhs;
1088 ReshapeRHSFunctionType reshape_rhs;
1089 GEMMFunctionType gemm;
1090 reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
1091 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
1092 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1093
1094 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1095 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1096
1097 // Allocate tensors
1098 lhs.allocator()->allocate();
1099 rhs.allocator()->allocate();
1100 lhs_reshaped.allocator()->allocate();
1101 rhs_reshaped.allocator()->allocate();
1102 dst.allocator()->allocate();
1103
1104 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1105 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1106 ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1107 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1108 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1109
1110 // Fill tensors
1111 fill(AccessorType(lhs), 0);
1112 fill(AccessorType(rhs), 1);
1113
1114 // Compute GEMM
1115 reshape_lhs.run();
1116 reshape_rhs.run();
1117 gemm.run();
1118
1119 return dst;
1120 }
1121
Sheri Zhang28287af2020-02-25 14:13:54 +00001122 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001123 {
1124 TensorShape dst_shape = lhs_shape;
1125 dst_shape.set(0, rhs_shape[0]);
1126 dst_shape.set(1, lhs_shape[1] / m_h);
1127 dst_shape.set(2, m_h);
1128 dst_shape.set(3, lhs_shape[2]);
1129
Sheri Zhang28287af2020-02-25 14:13:54 +00001130 switch(data_type)
1131 {
1132 case DataType::QASYMM8:
1133 {
1134 // Create reference
1135 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1136 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001137
Sheri Zhang28287af2020-02-25 14:13:54 +00001138 // Fill reference
1139 fill(lhs, 0);
1140 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001141
Sheri Zhang28287af2020-02-25 14:13:54 +00001142 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1143 }
1144 case DataType::QASYMM8_SIGNED:
1145 {
1146 // Create reference
1147 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1148 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1149
1150 // Fill reference
1151 fill(lhs, 0);
1152 fill(rhs, 1);
1153
1154 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1155 }
1156 default:
1157 ARM_COMPUTE_ERROR("Unsupported data type");
1158 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001159 }
1160
1161 TensorType _target{};
1162 SimpleTensor<int32_t> _reference{};
1163};
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001164
1165template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
1166class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
1167{
1168public:
1169 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001170 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1171 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001172 {
1173 GEMMLHSMatrixInfo lhs_info;
1174 lhs_info.m0 = m0;
1175 lhs_info.k0 = k0;
1176
1177 GEMMRHSMatrixInfo rhs_info;
1178 rhs_info.n0 = n0;
1179 rhs_info.k0 = k0;
1180 rhs_info.h0 = h0;
1181 rhs_info.interleave = interleave_rhs;
1182 rhs_info.transpose = transpose_rhs;
1183
1184 // Set the tensor shapes for LHS and RHS matrices
1185 const TensorShape lhs_shape(k, m, batch_size);
1186 const TensorShape rhs_shape(n, k, batch_size);
1187
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001188 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
1189 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001190 }
1191
1192protected:
1193 template <typename U>
1194 void fill(U &&tensor, int i)
1195 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001196 switch(tensor.data_type())
1197 {
1198 case DataType::QASYMM8:
1199 {
1200 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1201 std::uniform_int_distribution<> distribution(1, 254);
1202 library->fill(tensor, distribution, i);
1203 }
1204 break;
1205 case DataType::QASYMM8_SIGNED:
1206 {
1207 std::uniform_int_distribution<> distribution(-127, 126);
1208 library->fill(tensor, distribution, i);
1209 }
1210 break;
1211 default:
1212 ARM_COMPUTE_ERROR("Unsupported data type");
1213 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001214 }
1215
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001216 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1217 const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001218 {
1219 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001220 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1221 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001222 TensorType rhs_reshaped;
1223 TensorType dst;
1224
1225 const unsigned int M = lhs_shape[1];
1226 const unsigned int N = rhs_shape[0];
1227 const unsigned int K = lhs_shape[0];
1228
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001229 GEMMKernelInfo gemm_info;
1230 gemm_info.m = M;
1231 gemm_info.n = N;
1232 gemm_info.k = K;
1233 gemm_info.lhs_info = lhs_info;
1234 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001235 // The output tensor will be auto-initialized within the function
1236
1237 // Create and configure function
1238 ReshapeRHSFunctionType reshape_rhs;
1239 GEMMFunctionType gemm;
1240 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001241 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001242
1243 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1244 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1245
1246 // Allocate tensors
1247 lhs.allocator()->allocate();
1248 rhs.allocator()->allocate();
1249 rhs_reshaped.allocator()->allocate();
1250 dst.allocator()->allocate();
1251
1252 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1253 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1254 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1255 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1256
1257 // Fill tensors
1258 fill(AccessorType(lhs), 0);
1259 fill(AccessorType(rhs), 1);
1260
1261 // Compute GEMM
1262 reshape_rhs.run();
1263 gemm.run();
1264
1265 return dst;
1266 }
1267
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001268 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001269 {
1270 TensorShape dst_shape = lhs_shape;
1271 dst_shape[0] = rhs_shape[0];
1272 dst_shape[1] = lhs_shape[1];
1273
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001274 if(data_type == DataType::QASYMM8)
1275 {
1276 // Create reference
1277 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1278 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001279
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001280 // Fill reference
1281 fill(lhs, 0);
1282 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001283
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001284 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1285 }
1286 else
1287 {
1288 // Create reference
1289 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1290 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1291
1292 // Fill reference
1293 fill(lhs, 0);
1294 fill(rhs, 1);
1295
1296 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1297 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001298 }
1299
1300 TensorType _target{};
1301 SimpleTensor<int32_t> _reference{};
1302};
1303
1304template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
1305class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
1306{
1307public:
1308 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001309 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1310 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001311 {
1312 GEMMLHSMatrixInfo lhs_info;
1313 lhs_info.m0 = m0;
1314 lhs_info.k0 = k0;
1315
1316 GEMMRHSMatrixInfo rhs_info;
1317 rhs_info.n0 = n0;
1318 rhs_info.k0 = k0;
1319 rhs_info.h0 = h0;
1320 rhs_info.interleave = interleave_rhs;
1321 rhs_info.transpose = transpose_rhs;
1322
1323 // In case of GEMM3D, m is the product between m_w and m_h
1324 const unsigned int m = m_w * m_h;
1325
1326 // Set the tensor shapes for LHS and RHS matrices
1327 const TensorShape lhs_shape(k, m, batch_size);
1328 const TensorShape rhs_shape(n, k, batch_size);
1329
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001330 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1331 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001332 }
1333
1334protected:
1335 template <typename U>
1336 void fill(U &&tensor, int i)
1337 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001338 switch(tensor.data_type())
1339 {
1340 case DataType::QASYMM8:
1341 {
1342 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1343 std::uniform_int_distribution<> distribution(1, 254);
1344 library->fill(tensor, distribution, i);
1345 }
1346 break;
1347 case DataType::QASYMM8_SIGNED:
1348 {
1349 std::uniform_int_distribution<> distribution(-127, 126);
1350 library->fill(tensor, distribution, i);
1351 }
1352 break;
1353 default:
1354 ARM_COMPUTE_ERROR("Unsupported data type");
1355 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001356 }
1357
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001358 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1359 const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001360 {
1361 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001362 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1363 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001364 TensorType rhs_reshaped;
1365 TensorType dst;
1366
1367 const unsigned int M = lhs_shape[1];
1368 const unsigned int N = rhs_shape[0];
1369 const unsigned int K = lhs_shape[0];
1370
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001371 GEMMKernelInfo gemm_info;
1372 gemm_info.m = M;
1373 gemm_info.n = N;
1374 gemm_info.k = K;
1375 gemm_info.depth_output_gemm3d = m_h;
1376 gemm_info.lhs_info = lhs_info;
1377 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001378 // The output tensor will be auto-initialized within the function
1379
1380 // Create and configure function
1381 ReshapeRHSFunctionType reshape_rhs;
1382 GEMMFunctionType gemm;
1383 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001384 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001385
1386 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1387 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1388
1389 // Allocate tensors
1390 lhs.allocator()->allocate();
1391 rhs.allocator()->allocate();
1392 rhs_reshaped.allocator()->allocate();
1393 dst.allocator()->allocate();
1394
1395 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1396 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1397 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1398 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1399
1400 // Fill tensors
1401 fill(AccessorType(lhs), 0);
1402 fill(AccessorType(rhs), 1);
1403
1404 // Compute GEMM
1405 reshape_rhs.run();
1406 gemm.run();
1407
1408 return dst;
1409 }
1410
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001411 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001412 {
1413 TensorShape dst_shape = lhs_shape;
1414 dst_shape.set(0, rhs_shape[0]);
1415 dst_shape.set(1, lhs_shape[1] / m_h);
1416 dst_shape.set(2, m_h);
1417 dst_shape.set(3, lhs_shape[2]);
1418
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001419 if(data_type == DataType::QASYMM8)
1420 {
1421 // Create reference
1422 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1423 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001424
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001425 // Fill reference
1426 fill(lhs, 0);
1427 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001428
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001429 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1430 }
1431 else
1432 {
1433 // Create reference
1434 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1435 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1436
1437 // Fill reference
1438 fill(lhs, 0);
1439 fill(rhs, 1);
1440
1441 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1442 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001443 }
1444
1445 TensorType _target{};
1446 SimpleTensor<int32_t> _reference{};
1447};
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001448
1449template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1450class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture
1451{
1452public:
1453 template <typename...>
1454 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1455 {
1456 GEMMLHSMatrixInfo lhs_info;
1457 lhs_info.m0 = m0;
1458 lhs_info.k0 = k0;
1459
1460 GEMMRHSMatrixInfo rhs_info;
1461 rhs_info.n0 = n0;
1462 rhs_info.k0 = k0;
1463
1464 // Set the tensor shapes for LHS and RHS matrices
1465 const TensorShape lhs_shape(k, m, batch_size);
1466 const TensorShape rhs_shape(n, k, batch_size);
1467
1468 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
1469 _reference = compute_reference(lhs_shape, rhs_shape);
1470 }
1471
1472protected:
1473 template <typename U>
1474 void fill(U &&tensor, int i)
1475 {
1476 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1477 std::uniform_int_distribution<> distribution(1, 254);
1478 library->fill(tensor, distribution, i);
1479 }
1480
1481 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
1482 {
1483 // Create tensors
1484 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1485 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1486 TensorType dst;
1487
1488 const unsigned int M = lhs_shape[1];
1489 const unsigned int N = rhs_shape[0];
1490 const unsigned int K = lhs_shape[0];
1491
1492 // The output tensor will be auto-initialized within the function
1493
1494 // Create and configure function
1495 GEMMFunctionType gemm;
1496 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
1497
1498 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1499 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1500
1501 // Allocate tensors
1502 lhs.allocator()->allocate();
1503 rhs.allocator()->allocate();
1504 dst.allocator()->allocate();
1505
1506 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1507 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1508 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1509
1510 // Fill tensors
1511 fill(AccessorType(lhs), 0);
1512 fill(AccessorType(rhs), 1);
1513
1514 // Compute GEMM
1515 gemm.run();
1516
1517 return dst;
1518 }
1519
1520 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
1521 {
1522 TensorShape dst_shape = lhs_shape;
1523 dst_shape[0] = rhs_shape[0];
1524 dst_shape[1] = lhs_shape[1];
1525
1526 // Create reference
1527 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1528 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1529
1530 // Fill reference
1531 fill(lhs, 0);
1532 fill(rhs, 1);
1533
1534 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1535 }
1536
1537 TensorType _target{};
1538 SimpleTensor<int32_t> _reference{};
1539};
1540
1541template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1542class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture
1543{
1544public:
1545 template <typename...>
1546 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1547 {
1548 GEMMLHSMatrixInfo lhs_info;
1549 lhs_info.m0 = m0;
1550 lhs_info.k0 = k0;
1551
1552 GEMMRHSMatrixInfo rhs_info;
1553 rhs_info.n0 = n0;
1554 rhs_info.k0 = k0;
1555
1556 // In case of GEMM3D, m is the product between m_w and m_h
1557 const unsigned int m = m_w * m_h;
1558
1559 // Set the tensor shapes for LHS and RHS matrices
1560 const TensorShape lhs_shape(k, m, batch_size);
1561 const TensorShape rhs_shape(n, k, batch_size);
1562
1563 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
1564 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
1565 }
1566
1567protected:
1568 template <typename U>
1569 void fill(U &&tensor, int i)
1570 {
1571 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1572 std::uniform_int_distribution<> distribution(1, 254);
1573 library->fill(tensor, distribution, i);
1574 }
1575
1576 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
1577 {
1578 // Create tensors
1579 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1580 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1581 TensorType dst;
1582
1583 const unsigned int M = lhs_shape[1];
1584 const unsigned int N = rhs_shape[0];
1585 const unsigned int K = lhs_shape[0];
1586
1587 // The output tensor will be auto-initialized within the function
1588
1589 // Create and configure function
1590 GEMMFunctionType gemm;
1591 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1592
1593 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1594 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1595
1596 // Allocate tensors
1597 lhs.allocator()->allocate();
1598 rhs.allocator()->allocate();
1599 dst.allocator()->allocate();
1600
1601 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1602 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1603 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1604
1605 // Fill tensors
1606 fill(AccessorType(lhs), 0);
1607 fill(AccessorType(rhs), 1);
1608
1609 // Compute GEMM
1610 gemm.run();
1611
1612 return dst;
1613 }
1614
1615 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
1616 {
1617 TensorShape dst_shape = lhs_shape;
1618 dst_shape.set(0, rhs_shape[0]);
1619 dst_shape.set(1, lhs_shape[1] / m_h);
1620 dst_shape.set(2, m_h);
1621 dst_shape.set(3, lhs_shape[2]);
1622
1623 // Create reference
1624 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1625 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1626
1627 // Fill reference
1628 fill(lhs, 0);
1629 fill(rhs, 1);
1630
1631 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1632 }
1633
1634 TensorType _target{};
1635 SimpleTensor<int32_t> _reference{};
1636};
Pablo Tello299025a2017-09-29 11:30:12 +01001637} // namespace validation
1638} // namespace test
1639} // namespace arm_compute
George Wort2d7e6832019-02-22 16:37:41 +00001640#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */