blob: 5cf210bab43b4f4eec9c5ceea79f6de00080695f [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2017-2021 Arm Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
Michele Di Giorgiob54ba282020-01-14 15:31:55 +000027#include "arm_compute/core/KernelDescriptors.h"
Pablo Tello299025a2017-09-29 11:30:12 +010028#include "arm_compute/core/TensorShape.h"
29#include "arm_compute/core/Types.h"
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Pablo Tello299025a2017-09-29 11:30:12 +010031#include "tests/AssetsLibrary.h"
32#include "tests/Globals.h"
33#include "tests/IAccessor.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Fixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010038
39#include <random>
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
George Wort2d7e6832019-02-22 16:37:41 +000047namespace
48{
49template <typename U>
50void fill(U &&tensor, int i)
51{
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000052 switch(tensor.data_type())
53 {
54 case DataType::QSYMM8_PER_CHANNEL:
55 {
56 int min_bound = 128;
57 int max_bound = -127;
58 for(size_t j = 0; j < tensor.quantization_info().scale().size(); j++)
59 {
60 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
61 if(bounds.first < min_bound)
62 {
63 min_bound = bounds.first;
64 }
65 if(bounds.second > max_bound)
66 {
67 max_bound = bounds.second;
68 }
69 }
70 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
71 library->fill(tensor, distribution, i);
72 break;
73 }
74 case DataType::QASYMM8:
75 {
76 std::uniform_int_distribution<uint8_t> distribution(1, 254);
77 library->fill(tensor, distribution, i);
78 break;
79 }
80 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +000081 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +000082 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +000083 library->fill(tensor, distribution, i);
84 break;
85 }
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000086 case DataType::F32:
87 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +000088 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000089 library->fill(tensor, distribution, i);
90 break;
91 }
92 default:
93 library->fill_tensor_uniform(tensor, i);
94 }
George Wort2d7e6832019-02-22 16:37:41 +000095}
96
97template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
98TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +000099 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
100 QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +0000101{
102 // Create tensors
Manuel Bottini959c26d2019-12-02 16:22:35 +0000103 DataType data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
104
105 TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000106 TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
Manuel Bottini959c26d2019-12-02 16:22:35 +0000107 TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1);
George Wort2d7e6832019-02-22 16:37:41 +0000108
109 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
George Wort2d7e6832019-02-22 16:37:41 +0000110
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000111 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
112 {
113 b.info()->set_quantization_info(b_qinfo);
114 }
115 else
116 {
117 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
118 }
George Wort2d7e6832019-02-22 16:37:41 +0000119 TensorType bias;
120 if(is_fused)
121 {
122 TensorShape bias_shape(shape_b[0]);
123 bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
124 }
125
126 // Create and configure function
127 // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
128 FunctionType gemmlowp;
George Wort2d7e6832019-02-22 16:37:41 +0000129 gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage));
130
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100131 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
132 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
133 ARM_COMPUTE_ASSERT(output.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000134
Giorgio Arena63825e82021-03-25 14:54:50 +0000135 add_padding_x({ &a, &b, &output });
136
George Wort2d7e6832019-02-22 16:37:41 +0000137 // Allocate tensors
138 a.allocator()->allocate();
139 b.allocator()->allocate();
140 output.allocator()->allocate();
141
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100142 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
143 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
144 ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000145
146 // Fill tensors
147 fill(AccessorType(a), 0);
148 fill(AccessorType(b), 1);
149
150 if(is_fused)
151 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100152 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000153 bias.allocator()->allocate();
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100154 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000155 fill(AccessorType(bias), 2);
156 }
George Wort2d7e6832019-02-22 16:37:41 +0000157 // Compute GEMM function
158 gemmlowp.run();
159 return output;
160}
161
Manuel Bottini959c26d2019-12-02 16:22:35 +0000162template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000163SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000164 DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +0000165{
166 TensorShape shape_a_to_use = shape_a;
167 if(reinterpret_input_as_3d)
168 {
169 // Collapse the second and third dimension if the input is 3D
170 shape_a_to_use.collapse(2U, 1U);
171 }
172
173 // Create reference
Manuel Bottini959c26d2019-12-02 16:22:35 +0000174 SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1 };
175 SimpleTensor<TW> b{ shape_b, data_type_b, 1, data_type_b == DataType::QSYMM8_PER_CHANNEL ? b_qinfo : QuantizationInfo(1.0f / 255, b_offset) };
George Wort2d7e6832019-02-22 16:37:41 +0000176
177 // Fill reference
178 fill(a, 0);
179 fill(b, 1);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000180 return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>(a, b, shape_output, a_offset, b_offset);
George Wort2d7e6832019-02-22 16:37:41 +0000181}
182}
183
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100184template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
Gian Marcoe75a02b2017-11-08 12:24:09 +0000185class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +0100186{
187public:
188 template <typename...>
George Wort2d7e6832019-02-22 16:37:41 +0000189 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100190 {
George Wort2d7e6832019-02-22 16:37:41 +0000191 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
192 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100193 }
194
195protected:
George Wort2d7e6832019-02-22 16:37:41 +0000196 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100197 {
George Wort2d7e6832019-02-22 16:37:41 +0000198 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100199 }
200
George Wort2d7e6832019-02-22 16:37:41 +0000201 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100202 {
George Wort2d7e6832019-02-22 16:37:41 +0000203 return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100204 }
205
Pablo Tello6ff12a02017-11-02 16:09:35 +0000206 TensorType _target{};
207 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100208};
209
Manuel Bottini959c26d2019-12-02 16:22:35 +0000210template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t>
George Wort2d7e6832019-02-22 16:37:41 +0000211class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
212{
213public:
214 template <typename...>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000215 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b)
George Wort2d7e6832019-02-22 16:37:41 +0000216 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100217 ARM_COMPUTE_ASSERT(output_stage.type != GEMMLowpOutputStageType::NONE);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000218 DataType data_type_a = data_type_b == DataType::QASYMM8_SIGNED ? DataType::QASYMM8_SIGNED : DataType::QASYMM8;
219
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000220 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
221 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000222 output_stage.is_quantized_per_channel = true;
223 const size_t num_channels = shape_b[0];
224 std::vector<float> scales(num_channels);
225 std::uniform_real_distribution<float> distribution(0.f, 1.f);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000226 library->fill(scales, distribution, 0);
227 output_stage.gemmlowp_multipliers.resize(num_channels);
228 output_stage.gemmlowp_shifts.resize(num_channels);
229 for(size_t i = 0; i < num_channels; ++i)
230 {
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000231 quantization::calculate_quantized_multiplier(scales[i], &output_stage.gemmlowp_multipliers[i], &output_stage.gemmlowp_shifts[i]);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000232 }
233
Manuel Bottini959c26d2019-12-02 16:22:35 +0000234 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
235 _target = compute_target(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000236 }
237 else
238 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000239 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
240 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000241 }
George Wort2d7e6832019-02-22 16:37:41 +0000242 }
243
244protected:
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000245 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000246 DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000247 {
248 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000249 output_stage, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000250 }
251
Manuel Bottini959c26d2019-12-02 16:22:35 +0000252 SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
253 GEMMLowpOutputStageInfo output_stage, DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000254 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000255 SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW>(shape_a, shape_b, shape_output, a_offset, b_offset, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000256
257 TensorShape bias_shape(shape_b[0]);
258 SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
259 fill(bias, 2);
260
261 switch(output_stage.type)
262 {
263 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000264 return reference::gemmlowp_quantize_down_scale<int32_t, TW>(output, bias,
265 output_stage.gemmlowp_offset, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000266 break;
267 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000268 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TW>(output, bias,
269 output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000270 break;
271 default:
272 ARM_COMPUTE_ERROR("Not Supported!");
273 }
274 }
275
Manuel Bottini959c26d2019-12-02 16:22:35 +0000276 TensorType _target{};
277 SimpleTensor<TI> _reference{};
George Wort2d7e6832019-02-22 16:37:41 +0000278};
279
Gian Marcoe75a02b2017-11-08 12:24:09 +0000280template <typename TensorType, typename AccessorType, typename FunctionType>
281class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
282{
283public:
284 template <typename...>
Gian Marco6b77e912017-11-17 09:27:57 +0000285 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000286 {
Gian Marco6b77e912017-11-17 09:27:57 +0000287 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
288 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000289 }
290
291protected:
292 template <typename U>
293 void fill(U &&tensor, int i)
294 {
295 std::uniform_int_distribution<> distribution(-6000, 6000);
296 library->fill(tensor, distribution, i);
297 }
298
Gian Marco6b77e912017-11-17 09:27:57 +0000299 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000300 {
Gian Marco6b77e912017-11-17 09:27:57 +0000301 TensorShape shape_bias(shape[0]);
302
Gian Marcoe75a02b2017-11-08 12:24:09 +0000303 // Create tensors
304 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
Gian Marco6b77e912017-11-17 09:27:57 +0000305 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
306 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000307
308 // Create and configure function
Luca Foschiani4b869532020-02-13 15:07:36 +0000309 FunctionType output_stage;
310 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
311 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
312 output_stage_info.gemmlowp_offset = result_offset;
313 output_stage_info.gemmlowp_multiplier = result_mult_int;
314 output_stage_info.gemmlowp_shift = result_shift;
315 output_stage_info.gemmlowp_min_bound = min;
316 output_stage_info.gemmlowp_max_bound = max;
317 output_stage_info.output_data_type = DataType::QASYMM8;
318 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000319
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100320 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
321 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Gian Marcoe75a02b2017-11-08 12:24:09 +0000322
323 // Allocate tensors
324 a.allocator()->allocate();
Gian Marco6b77e912017-11-17 09:27:57 +0000325 c.allocator()->allocate();
Gian Marcoe75a02b2017-11-08 12:24:09 +0000326
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100327 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
328 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Gian Marcoe75a02b2017-11-08 12:24:09 +0000329
Gian Marco6b77e912017-11-17 09:27:57 +0000330 // Fill tensor
Gian Marcoe75a02b2017-11-08 12:24:09 +0000331 fill(AccessorType(a), 0);
332
Gian Marco6b77e912017-11-17 09:27:57 +0000333 if(add_bias)
334 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100335 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Gian Marco6b77e912017-11-17 09:27:57 +0000336
337 // Allocate bias tensor
338 b.allocator()->allocate();
339
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100340 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Gian Marco6b77e912017-11-17 09:27:57 +0000341
342 // Fill tensor
343 fill(AccessorType(b), 1);
344 }
345
Gian Marcoe75a02b2017-11-08 12:24:09 +0000346 // Compute GEMM function
347 output_stage.run();
Gian Marco6b77e912017-11-17 09:27:57 +0000348 return c;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000349 }
350
Gian Marco6b77e912017-11-17 09:27:57 +0000351 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000352 {
353 // Create reference
Gian Marco6b77e912017-11-17 09:27:57 +0000354 TensorShape shape_bias(shape[0]);
355
Gian Marcoe75a02b2017-11-08 12:24:09 +0000356 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
Gian Marco6b77e912017-11-17 09:27:57 +0000357 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000358
359 // Fill reference
360 fill(a, 0);
361
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000362 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
363 const std::vector<int32_t> result_shift_vec = { result_shift };
364
Gian Marco6b77e912017-11-17 09:27:57 +0000365 if(add_bias)
366 {
367 // Fill bias
368 fill(b, 1);
369
Manuel Bottini959c26d2019-12-02 16:22:35 +0000370 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000371 }
372 else
373 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000374 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000375 }
Gian Marcoe75a02b2017-11-08 12:24:09 +0000376 }
377
378 TensorType _target{};
379 SimpleTensor<uint8_t> _reference{};
380};
Gian Marco58c57942017-11-28 09:10:03 +0000381
382template <typename TensorType, typename AccessorType, typename FunctionType>
Luca Foschiani4b869532020-02-13 15:07:36 +0000383class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture
384{
385public:
386 template <typename...>
387 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
388 {
389 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
390 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
391 }
392
393protected:
394 template <typename U>
395 void fill(U &&tensor, int i)
396 {
397 std::uniform_int_distribution<> distribution(-6000, 6000);
398 library->fill(tensor, distribution, i);
399 }
400
401 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
402 {
403 TensorShape shape_bias(shape[0]);
404
405 // Create tensors
406 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
407 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
408 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
409
410 // Create and configure function
411 FunctionType output_stage;
412 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
413 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
414 output_stage_info.gemmlowp_offset = result_offset;
415 output_stage_info.gemmlowp_multiplier = result_mult_int;
416 output_stage_info.gemmlowp_shift = result_shift;
417 output_stage_info.gemmlowp_min_bound = min;
418 output_stage_info.gemmlowp_max_bound = max;
419 output_stage_info.output_data_type = DataType::QASYMM8_SIGNED;
420 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
421
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100422 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
423 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000424
425 // Allocate tensors
426 a.allocator()->allocate();
427 c.allocator()->allocate();
428
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100429 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
430 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000431
432 // Fill tensor
433 fill(AccessorType(a), 0);
434
435 if(add_bias)
436 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100437 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000438
439 // Allocate bias tensor
440 b.allocator()->allocate();
441
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100442 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000443
444 // Fill tensor
445 fill(AccessorType(b), 1);
446 }
447
448 // Compute GEMM function
449 output_stage.run();
450 return c;
451 }
452
453 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
454 {
455 // Create reference
456 TensorShape shape_bias(shape[0]);
457
458 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
459 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
460
461 // Fill reference
462 fill(a, 0);
463
464 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
465 const std::vector<int32_t> result_shift_vec = { result_shift };
466
467 if(add_bias)
468 {
469 // Fill bias
470 fill(b, 1);
471
472 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
473 }
474 else
475 {
476 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
477 }
478 }
479
480 TensorType _target{};
481 SimpleTensor<int8_t> _reference{};
482};
483
484template <typename TensorType, typename AccessorType, typename FunctionType>
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000485class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture
486{
487public:
488 template <typename...>
489 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
490 {
491 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
492 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
493 }
494
495protected:
496 template <typename U>
497 void fill(U &&tensor, int i)
498 {
499 std::uniform_int_distribution<> distribution(-6000, 6000);
500 library->fill(tensor, distribution, i);
501 }
502
503 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
504 {
505 TensorShape shape_bias(shape[0]);
506
507 // Create tensors
508 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
509 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
510 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
511
512 // Create and configure function
513 FunctionType output_stage;
514 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
515
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100516 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
517 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000518
519 // Allocate tensors
520 a.allocator()->allocate();
521 c.allocator()->allocate();
522
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100523 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
524 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000525
526 // Fill tensor
527 fill(AccessorType(a), 0);
528
529 if(add_bias)
530 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100531 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000532
533 // Allocate bias tensor
534 b.allocator()->allocate();
535
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100536 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000537
538 // Fill tensor
539 fill(AccessorType(b), 1);
540 }
541
542 // Compute GEMM function
543 output_stage.run();
544 return c;
545 }
546
547 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
548 bool add_bias)
549 {
550 // Create reference
551 TensorShape shape_bias(shape[0]);
552
553 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
554 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
555
556 // Fill reference
557 fill(a, 0);
558
559 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
560 const std::vector<int32_t> result_shift_vec = { result_shift };
561
562 if(add_bias)
563 {
564 // Fill bias
565 fill(b, 1);
566
567 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
568 }
569 else
570 {
571 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
572 }
573 }
574
575 TensorType _target{};
576 SimpleTensor<int8_t> _reference{};
577};
578
579template <typename TensorType, typename AccessorType, typename FunctionType>
Gian Marco58c57942017-11-28 09:10:03 +0000580class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
581{
582public:
583 template <typename...>
584 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
585 {
586 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
587 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
588 }
589
590protected:
591 template <typename U>
592 void fill(U &&tensor, int i)
593 {
594 std::uniform_int_distribution<> distribution(-6000, 6000);
595 library->fill(tensor, distribution, i);
596 }
597
598 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
599 {
600 TensorShape shape_bias(shape[0]);
601
602 // Create tensors
603 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
604 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
605 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
606
607 // Create and configure function
608 FunctionType output_stage;
609 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
610
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100611 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
612 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000613
614 // Allocate tensors
615 a.allocator()->allocate();
616 c.allocator()->allocate();
617
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100618 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
619 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000620
621 // Fill tensor
622 fill(AccessorType(a), 0);
623
624 if(add_bias)
625 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100626 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000627
628 // Allocate bias tensor
629 b.allocator()->allocate();
630
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100631 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000632
633 // Fill tensor
634 fill(AccessorType(b), 1);
635 }
636
637 // Compute GEMM function
638 output_stage.run();
639 return c;
640 }
641
642 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
643 bool add_bias)
644 {
645 // Create reference
646 TensorShape shape_bias(shape[0]);
647
648 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
649 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
650
651 // Fill reference
652 fill(a, 0);
653
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000654 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
655 const std::vector<int32_t> result_shift_vec = { result_shift };
656
Gian Marco58c57942017-11-28 09:10:03 +0000657 if(add_bias)
658 {
659 // Fill bias
660 fill(b, 1);
661
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000662 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000663 }
664 else
665 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000666 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000667 }
668 }
669
670 TensorType _target{};
671 SimpleTensor<uint8_t> _reference{};
672};
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000673
Sheri Zhang1b14c752020-03-09 14:29:52 +0000674template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
675class GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture : public framework::Fixture
676{
677public:
678 template <typename...>
679 void setup(DataType data_type, TensorShape shape, float result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
680 {
681 _target = compute_target(data_type, shape, result_real_multiplier, result_offset, min, max, add_bias);
682 _reference = compute_reference(shape, result_real_multiplier, result_offset, min, max, add_bias);
683 }
684
685protected:
686 template <typename U>
687 void fill(U &&tensor, int i)
688 {
689 // To avoid data all being clampped
690 std::uniform_int_distribution<> distribution(-500, 500);
691 library->fill(tensor, distribution, i);
692 }
693
694 TensorType compute_target(DataType data_type, const TensorShape &shape, float result_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
695 {
696 TensorShape shape_bias(shape[0]);
697
698 // Create tensors
699 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
700 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
701 TensorType c = create_tensor<TensorType>(shape, data_type, 1);
702
703 // create output stage info
704 GEMMLowpOutputStageInfo info;
705 info.gemmlowp_max_bound = max;
706 info.gemmlowp_min_bound = min;
707 info.gemmlowp_real_multiplier = result_multiplier;
708 info.gemmlowp_offset = result_offset;
709 info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT;
710 info.output_data_type = data_type;
711
712 // Create and configure function
713 FunctionType output_stage;
714 output_stage.configure(&a, add_bias ? &b : nullptr, &c, info);
715
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100716 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
717 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000718
719 // Allocate tensors
720 a.allocator()->allocate();
721 c.allocator()->allocate();
722
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100723 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
724 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000725
726 // Fill tensor
727 fill(AccessorType(a), 0);
728
729 if(add_bias)
730 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100731 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000732
733 // Allocate bias tensor
734 b.allocator()->allocate();
735
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100736 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000737
738 // Fill tensor
739 fill(AccessorType(b), 1);
740 }
741
742 // Compute GEMM function
743 output_stage.run();
744 return c;
745 }
746
747 SimpleTensor<T> compute_reference(const TensorShape &shape, float_t result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
748 {
749 // Create reference
750 TensorShape shape_bias(shape[0]);
751
752 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
753 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
754
755 // Fill reference
756 fill(a, 0);
757
758 const std::vector<float_t> result_float_multiplier_vec = { result_real_multiplier };
759
760 if(add_bias)
761 {
762 // Fill bias
763 fill(b, 1);
764
765 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, b, result_float_multiplier_vec, result_offset, min, max);
766 }
767 else
768 {
769 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, result_float_multiplier_vec, result_offset, min, max);
770 }
771 }
772
773 TensorType _target{};
774 SimpleTensor<T> _reference{};
775};
776
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100777template <typename TensorType, typename AccessorType, typename FunctionType>
778class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
779{
780public:
781 template <typename...>
782 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
783 {
784 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
785 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
786 }
787
788protected:
789 template <typename U>
790 void fill(U &&tensor, int i)
791 {
792 std::uniform_int_distribution<> distribution(-6000, 6000);
793 library->fill(tensor, distribution, i);
794 }
795
796 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
797 {
798 TensorShape shape_bias(shape[0]);
799
800 // Create tensors
801 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
802 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
803 TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
804
805 // Create and configure function
806 FunctionType output_stage;
807 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
808
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100809 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
810 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100811
812 // Allocate tensors
813 a.allocator()->allocate();
814 c.allocator()->allocate();
815
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100816 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
817 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100818
819 // Fill tensor
820 fill(AccessorType(a), 0);
821
822 if(add_bias)
823 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100824 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100825
826 // Allocate bias tensor
827 b.allocator()->allocate();
828
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100829 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100830
831 // Fill tensor
832 fill(AccessorType(b), 1);
833 }
834
835 // Compute GEMM function
836 output_stage.run();
837 return c;
838 }
839
840 SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
841 bool add_bias)
842 {
843 // Create reference
844 TensorShape shape_bias(shape[0]);
845
846 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
847 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
848
849 // Fill reference
850 fill(a, 0);
851
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000852 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
853 const std::vector<int32_t> result_shift_vec = { result_shift };
854
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100855 if(add_bias)
856 {
857 // Fill bias
858 fill(b, 1);
859
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000860 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100861 }
862 else
863 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000864 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100865 }
866 }
867
868 TensorType _target{};
869 SimpleTensor<int16_t> _reference{};
870};
871
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100872template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000873class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
874{
875public:
876 template <typename...>
877 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
Sheri Zhang28287af2020-02-25 14:13:54 +0000878 bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000879 {
880 GEMMLHSMatrixInfo lhs_info;
881 lhs_info.m0 = m0;
882 lhs_info.k0 = k0;
883 lhs_info.v0 = v0;
884 lhs_info.interleave = interleave_lhs;
885 lhs_info.transpose = false;
886
887 GEMMRHSMatrixInfo rhs_info;
888 rhs_info.n0 = n0;
889 rhs_info.k0 = k0;
890 rhs_info.h0 = h0;
891 rhs_info.interleave = interleave_rhs;
892 rhs_info.transpose = true;
893
894 // Set the tensor shapes for LHS and RHS matrices
895 const TensorShape lhs_shape(k, m, batch_size);
896 const TensorShape rhs_shape(n, k, batch_size);
897
Sheri Zhang28287af2020-02-25 14:13:54 +0000898 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
899 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000900 }
901
902protected:
903 template <typename U>
904 void fill(U &&tensor, int i)
905 {
Sheri Zhang28287af2020-02-25 14:13:54 +0000906 switch(tensor.data_type())
907 {
908 case DataType::QASYMM8:
909 {
910 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
911 std::uniform_int_distribution<> distribution(1, 254);
912 library->fill(tensor, distribution, i);
913 }
914 break;
915 case DataType::QASYMM8_SIGNED:
916 {
917 std::uniform_int_distribution<> distribution(-127, 126);
918 library->fill(tensor, distribution, i);
919 }
920 break;
921 default:
922 ARM_COMPUTE_ERROR("Unsupported data type");
923 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000924 }
925
Sheri Zhang28287af2020-02-25 14:13:54 +0000926 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000927 {
928 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +0000929 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
930 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000931 TensorType lhs_reshaped;
932 TensorType rhs_reshaped;
933 TensorType dst;
934
935 const unsigned int M = lhs_shape[1];
936 const unsigned int N = rhs_shape[0];
937 const unsigned int K = lhs_shape[0];
938
939 // The output tensor will be auto-initialized within the function
940
941 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100942 ReshapeLHSOperatorType reshape_lhs;
943 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000944 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100945 reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
946 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000947 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
948
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100949 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
950 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000951
Giorgio Arena63825e82021-03-25 14:54:50 +0000952 add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
953
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000954 // Allocate tensors
955 lhs.allocator()->allocate();
956 rhs.allocator()->allocate();
957 lhs_reshaped.allocator()->allocate();
958 rhs_reshaped.allocator()->allocate();
959 dst.allocator()->allocate();
960
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100961 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
962 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
963 ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
964 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
965 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000966
967 // Fill tensors
968 fill(AccessorType(lhs), 0);
969 fill(AccessorType(rhs), 1);
970
971 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100972 ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
973 reshape_lhs.run(reshape_lhs_pack);
974 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
975 reshape_rhs.run(reshape_rhs_pack);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000976 gemm.run();
977
978 return dst;
979 }
980
Sheri Zhang28287af2020-02-25 14:13:54 +0000981 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000982 {
983 TensorShape dst_shape = lhs_shape;
984 dst_shape[0] = rhs_shape[0];
985 dst_shape[1] = lhs_shape[1];
986
Sheri Zhang28287af2020-02-25 14:13:54 +0000987 switch(data_type)
988 {
989 case DataType::QASYMM8:
990 {
991 // Create reference
992 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
993 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000994
Sheri Zhang28287af2020-02-25 14:13:54 +0000995 // Fill reference
996 fill(lhs, 0);
997 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000998
Sheri Zhang28287af2020-02-25 14:13:54 +0000999 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1000 }
1001 case DataType::QASYMM8_SIGNED:
1002 {
1003 // Create reference
1004 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1005 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1006
1007 // Fill reference
1008 fill(lhs, 0);
1009 fill(rhs, 1);
1010
1011 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1012 }
1013 default:
1014 ARM_COMPUTE_ERROR("Unsupported data type");
1015 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001016 }
1017
1018 TensorType _target{};
1019 SimpleTensor<int32_t> _reference{};
1020};
1021
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001022template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001023class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
1024{
1025public:
1026 template <typename...>
1027 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
Sheri Zhang28287af2020-02-25 14:13:54 +00001028 bool interleave_lhs, bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001029 {
1030 GEMMLHSMatrixInfo lhs_info;
1031 lhs_info.m0 = m0;
1032 lhs_info.k0 = k0;
1033 lhs_info.v0 = v0;
1034 lhs_info.interleave = interleave_lhs;
1035 lhs_info.transpose = false;
1036
1037 GEMMRHSMatrixInfo rhs_info;
1038 rhs_info.n0 = n0;
1039 rhs_info.k0 = k0;
1040 rhs_info.h0 = h0;
1041 rhs_info.interleave = interleave_rhs;
1042 rhs_info.transpose = true;
1043
1044 // In case of GEMM3D, m is the product between m_w and m_h
1045 const unsigned int m = m_w * m_h;
1046
1047 // Set the tensor shapes for LHS and RHS matrices
1048 const TensorShape lhs_shape(k, m, batch_size);
1049 const TensorShape rhs_shape(n, k, batch_size);
1050
Sheri Zhang28287af2020-02-25 14:13:54 +00001051 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1052 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001053 }
1054
1055protected:
1056 template <typename U>
1057 void fill(U &&tensor, int i)
1058 {
Sheri Zhang28287af2020-02-25 14:13:54 +00001059 switch(tensor.data_type())
1060 {
1061 case DataType::QASYMM8:
1062 {
1063 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1064 std::uniform_int_distribution<> distribution(1, 254);
1065 library->fill(tensor, distribution, i);
1066 }
1067 break;
1068 case DataType::QASYMM8_SIGNED:
1069 {
1070 std::uniform_int_distribution<> distribution(-127, 126);
1071 library->fill(tensor, distribution, i);
1072 }
1073 break;
1074 default:
1075 ARM_COMPUTE_ERROR("Unsupported data type");
1076 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001077 }
1078
Sheri Zhang28287af2020-02-25 14:13:54 +00001079 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h,
1080 DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001081 {
1082 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +00001083 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1084 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001085 TensorType lhs_reshaped;
1086 TensorType rhs_reshaped;
1087 TensorType dst;
1088
1089 const unsigned int M = lhs_shape[1];
1090 const unsigned int N = rhs_shape[0];
1091 const unsigned int K = lhs_shape[0];
1092
1093 // The output tensor will be auto-initialized within the function
1094
1095 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001096 ReshapeLHSOperatorType reshape_lhs;
1097 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001098 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001099 reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
1100 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001101 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1102
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001103 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1104 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001105
Giorgio Arena63825e82021-03-25 14:54:50 +00001106 add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
1107
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001108 // Allocate tensors
1109 lhs.allocator()->allocate();
1110 rhs.allocator()->allocate();
1111 lhs_reshaped.allocator()->allocate();
1112 rhs_reshaped.allocator()->allocate();
1113 dst.allocator()->allocate();
1114
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001115 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1116 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1117 ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
1118 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
1119 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001120
1121 // Fill tensors
1122 fill(AccessorType(lhs), 0);
1123 fill(AccessorType(rhs), 1);
1124
1125 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001126 ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
1127 reshape_lhs.run(reshape_lhs_pack);
1128 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
1129 reshape_rhs.run(reshape_rhs_pack);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001130 gemm.run();
1131
1132 return dst;
1133 }
1134
Sheri Zhang28287af2020-02-25 14:13:54 +00001135 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001136 {
1137 TensorShape dst_shape = lhs_shape;
1138 dst_shape.set(0, rhs_shape[0]);
1139 dst_shape.set(1, lhs_shape[1] / m_h);
1140 dst_shape.set(2, m_h);
1141 dst_shape.set(3, lhs_shape[2]);
1142
Sheri Zhang28287af2020-02-25 14:13:54 +00001143 switch(data_type)
1144 {
1145 case DataType::QASYMM8:
1146 {
1147 // Create reference
1148 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1149 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001150
Sheri Zhang28287af2020-02-25 14:13:54 +00001151 // Fill reference
1152 fill(lhs, 0);
1153 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001154
Sheri Zhang28287af2020-02-25 14:13:54 +00001155 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1156 }
1157 case DataType::QASYMM8_SIGNED:
1158 {
1159 // Create reference
1160 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1161 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1162
1163 // Fill reference
1164 fill(lhs, 0);
1165 fill(rhs, 1);
1166
1167 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1168 }
1169 default:
1170 ARM_COMPUTE_ERROR("Unsupported data type");
1171 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001172 }
1173
1174 TensorType _target{};
1175 SimpleTensor<int32_t> _reference{};
1176};
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001177
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001178template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001179class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
1180{
1181public:
1182 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001183 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1184 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001185 {
1186 GEMMLHSMatrixInfo lhs_info;
1187 lhs_info.m0 = m0;
1188 lhs_info.k0 = k0;
1189
1190 GEMMRHSMatrixInfo rhs_info;
1191 rhs_info.n0 = n0;
1192 rhs_info.k0 = k0;
1193 rhs_info.h0 = h0;
1194 rhs_info.interleave = interleave_rhs;
1195 rhs_info.transpose = transpose_rhs;
1196
1197 // Set the tensor shapes for LHS and RHS matrices
1198 const TensorShape lhs_shape(k, m, batch_size);
1199 const TensorShape rhs_shape(n, k, batch_size);
1200
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001201 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
1202 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001203 }
1204
1205protected:
1206 template <typename U>
1207 void fill(U &&tensor, int i)
1208 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001209 switch(tensor.data_type())
1210 {
1211 case DataType::QASYMM8:
1212 {
1213 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1214 std::uniform_int_distribution<> distribution(1, 254);
1215 library->fill(tensor, distribution, i);
1216 }
1217 break;
1218 case DataType::QASYMM8_SIGNED:
1219 {
1220 std::uniform_int_distribution<> distribution(-127, 126);
1221 library->fill(tensor, distribution, i);
1222 }
1223 break;
1224 default:
1225 ARM_COMPUTE_ERROR("Unsupported data type");
1226 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001227 }
1228
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001229 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1230 const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001231 {
1232 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001233 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1234 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001235 TensorType rhs_reshaped;
1236 TensorType dst;
1237
1238 const unsigned int M = lhs_shape[1];
1239 const unsigned int N = rhs_shape[0];
1240 const unsigned int K = lhs_shape[0];
1241
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001242 GEMMKernelInfo gemm_info;
1243 gemm_info.m = M;
1244 gemm_info.n = N;
1245 gemm_info.k = K;
1246 gemm_info.lhs_info = lhs_info;
1247 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001248 // The output tensor will be auto-initialized within the function
1249
1250 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001251 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001252 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001253 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001254 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001255
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001256 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1257 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001258
Giorgio Arena63825e82021-03-25 14:54:50 +00001259 add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
1260
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001261 // Allocate tensors
1262 lhs.allocator()->allocate();
1263 rhs.allocator()->allocate();
1264 rhs_reshaped.allocator()->allocate();
1265 dst.allocator()->allocate();
1266
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001267 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1268 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1269 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
1270 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001271
1272 // Fill tensors
1273 fill(AccessorType(lhs), 0);
1274 fill(AccessorType(rhs), 1);
1275
1276 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001277 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
1278 reshape_rhs.run(reshape_rhs_pack);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001279 gemm.run();
1280
1281 return dst;
1282 }
1283
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001284 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001285 {
1286 TensorShape dst_shape = lhs_shape;
1287 dst_shape[0] = rhs_shape[0];
1288 dst_shape[1] = lhs_shape[1];
1289
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001290 if(data_type == DataType::QASYMM8)
1291 {
1292 // Create reference
1293 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1294 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001295
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001296 // Fill reference
1297 fill(lhs, 0);
1298 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001299
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001300 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1301 }
1302 else
1303 {
1304 // Create reference
1305 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1306 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1307
1308 // Fill reference
1309 fill(lhs, 0);
1310 fill(rhs, 1);
1311
1312 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1313 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001314 }
1315
1316 TensorType _target{};
1317 SimpleTensor<int32_t> _reference{};
1318};
1319
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001320template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001321class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
1322{
1323public:
1324 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001325 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1326 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001327 {
1328 GEMMLHSMatrixInfo lhs_info;
1329 lhs_info.m0 = m0;
1330 lhs_info.k0 = k0;
1331
1332 GEMMRHSMatrixInfo rhs_info;
1333 rhs_info.n0 = n0;
1334 rhs_info.k0 = k0;
1335 rhs_info.h0 = h0;
1336 rhs_info.interleave = interleave_rhs;
1337 rhs_info.transpose = transpose_rhs;
1338
1339 // In case of GEMM3D, m is the product between m_w and m_h
1340 const unsigned int m = m_w * m_h;
1341
1342 // Set the tensor shapes for LHS and RHS matrices
1343 const TensorShape lhs_shape(k, m, batch_size);
1344 const TensorShape rhs_shape(n, k, batch_size);
1345
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001346 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1347 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001348 }
1349
1350protected:
1351 template <typename U>
1352 void fill(U &&tensor, int i)
1353 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001354 switch(tensor.data_type())
1355 {
1356 case DataType::QASYMM8:
1357 {
1358 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1359 std::uniform_int_distribution<> distribution(1, 254);
1360 library->fill(tensor, distribution, i);
1361 }
1362 break;
1363 case DataType::QASYMM8_SIGNED:
1364 {
1365 std::uniform_int_distribution<> distribution(-127, 126);
1366 library->fill(tensor, distribution, i);
1367 }
1368 break;
1369 default:
1370 ARM_COMPUTE_ERROR("Unsupported data type");
1371 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001372 }
1373
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001374 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1375 const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001376 {
1377 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001378 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1379 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001380 TensorType rhs_reshaped;
1381 TensorType dst;
1382
1383 const unsigned int M = lhs_shape[1];
1384 const unsigned int N = rhs_shape[0];
1385 const unsigned int K = lhs_shape[0];
1386
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001387 GEMMKernelInfo gemm_info;
1388 gemm_info.m = M;
1389 gemm_info.n = N;
1390 gemm_info.k = K;
1391 gemm_info.depth_output_gemm3d = m_h;
1392 gemm_info.lhs_info = lhs_info;
1393 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001394 // The output tensor will be auto-initialized within the function
1395
1396 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001397 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001398 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001399 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001400 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001401
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001402 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1403 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001404
Giorgio Arena63825e82021-03-25 14:54:50 +00001405 add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
1406
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001407 // Allocate tensors
1408 lhs.allocator()->allocate();
1409 rhs.allocator()->allocate();
1410 rhs_reshaped.allocator()->allocate();
1411 dst.allocator()->allocate();
1412
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001413 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1414 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1415 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
1416 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001417
1418 // Fill tensors
1419 fill(AccessorType(lhs), 0);
1420 fill(AccessorType(rhs), 1);
1421
1422 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001423 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
1424 reshape_rhs.run(reshape_rhs_pack);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001425 gemm.run();
1426
1427 return dst;
1428 }
1429
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001430 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001431 {
1432 TensorShape dst_shape = lhs_shape;
1433 dst_shape.set(0, rhs_shape[0]);
1434 dst_shape.set(1, lhs_shape[1] / m_h);
1435 dst_shape.set(2, m_h);
1436 dst_shape.set(3, lhs_shape[2]);
1437
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001438 if(data_type == DataType::QASYMM8)
1439 {
1440 // Create reference
1441 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1442 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001443
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001444 // Fill reference
1445 fill(lhs, 0);
1446 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001447
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001448 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1449 }
1450 else
1451 {
1452 // Create reference
1453 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1454 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1455
1456 // Fill reference
1457 fill(lhs, 0);
1458 fill(rhs, 1);
1459
1460 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1461 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001462 }
1463
1464 TensorType _target{};
1465 SimpleTensor<int32_t> _reference{};
1466};
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001467
1468template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1469class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture
1470{
1471public:
1472 template <typename...>
1473 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1474 {
1475 GEMMLHSMatrixInfo lhs_info;
1476 lhs_info.m0 = m0;
1477 lhs_info.k0 = k0;
1478
1479 GEMMRHSMatrixInfo rhs_info;
1480 rhs_info.n0 = n0;
1481 rhs_info.k0 = k0;
1482
1483 // Set the tensor shapes for LHS and RHS matrices
1484 const TensorShape lhs_shape(k, m, batch_size);
1485 const TensorShape rhs_shape(n, k, batch_size);
1486
1487 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
1488 _reference = compute_reference(lhs_shape, rhs_shape);
1489 }
1490
1491protected:
1492 template <typename U>
1493 void fill(U &&tensor, int i)
1494 {
1495 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1496 std::uniform_int_distribution<> distribution(1, 254);
1497 library->fill(tensor, distribution, i);
1498 }
1499
1500 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
1501 {
1502 // Create tensors
1503 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1504 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1505 TensorType dst;
1506
1507 const unsigned int M = lhs_shape[1];
1508 const unsigned int N = rhs_shape[0];
1509 const unsigned int K = lhs_shape[0];
1510
1511 // The output tensor will be auto-initialized within the function
1512
1513 // Create and configure function
1514 GEMMFunctionType gemm;
1515 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
1516
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001517 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1518 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001519
Giorgio Arena63825e82021-03-25 14:54:50 +00001520 add_padding_x({ &lhs, &rhs, &dst });
1521
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001522 // Allocate tensors
1523 lhs.allocator()->allocate();
1524 rhs.allocator()->allocate();
1525 dst.allocator()->allocate();
1526
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001527 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1528 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1529 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001530
1531 // Fill tensors
1532 fill(AccessorType(lhs), 0);
1533 fill(AccessorType(rhs), 1);
1534
1535 // Compute GEMM
1536 gemm.run();
1537
1538 return dst;
1539 }
1540
1541 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
1542 {
1543 TensorShape dst_shape = lhs_shape;
1544 dst_shape[0] = rhs_shape[0];
1545 dst_shape[1] = lhs_shape[1];
1546
1547 // Create reference
1548 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1549 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1550
1551 // Fill reference
1552 fill(lhs, 0);
1553 fill(rhs, 1);
1554
1555 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1556 }
1557
1558 TensorType _target{};
1559 SimpleTensor<int32_t> _reference{};
1560};
1561
1562template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1563class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture
1564{
1565public:
1566 template <typename...>
1567 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1568 {
1569 GEMMLHSMatrixInfo lhs_info;
1570 lhs_info.m0 = m0;
1571 lhs_info.k0 = k0;
1572
1573 GEMMRHSMatrixInfo rhs_info;
1574 rhs_info.n0 = n0;
1575 rhs_info.k0 = k0;
1576
1577 // In case of GEMM3D, m is the product between m_w and m_h
1578 const unsigned int m = m_w * m_h;
1579
1580 // Set the tensor shapes for LHS and RHS matrices
1581 const TensorShape lhs_shape(k, m, batch_size);
1582 const TensorShape rhs_shape(n, k, batch_size);
1583
1584 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
1585 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
1586 }
1587
1588protected:
1589 template <typename U>
1590 void fill(U &&tensor, int i)
1591 {
1592 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1593 std::uniform_int_distribution<> distribution(1, 254);
1594 library->fill(tensor, distribution, i);
1595 }
1596
1597 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
1598 {
1599 // Create tensors
1600 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1601 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1602 TensorType dst;
1603
1604 const unsigned int M = lhs_shape[1];
1605 const unsigned int N = rhs_shape[0];
1606 const unsigned int K = lhs_shape[0];
1607
1608 // The output tensor will be auto-initialized within the function
1609
1610 // Create and configure function
1611 GEMMFunctionType gemm;
1612 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1613
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001614 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1615 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001616
Giorgio Arena63825e82021-03-25 14:54:50 +00001617 add_padding_x({ &lhs, &rhs, &dst });
1618
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001619 // Allocate tensors
1620 lhs.allocator()->allocate();
1621 rhs.allocator()->allocate();
1622 dst.allocator()->allocate();
1623
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001624 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1625 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1626 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001627
1628 // Fill tensors
1629 fill(AccessorType(lhs), 0);
1630 fill(AccessorType(rhs), 1);
1631
1632 // Compute GEMM
1633 gemm.run();
1634
1635 return dst;
1636 }
1637
1638 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
1639 {
1640 TensorShape dst_shape = lhs_shape;
1641 dst_shape.set(0, rhs_shape[0]);
1642 dst_shape.set(1, lhs_shape[1] / m_h);
1643 dst_shape.set(2, m_h);
1644 dst_shape.set(3, lhs_shape[2]);
1645
1646 // Create reference
1647 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1648 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1649
1650 // Fill reference
1651 fill(lhs, 0);
1652 fill(rhs, 1);
1653
1654 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1655 }
1656
1657 TensorType _target{};
1658 SimpleTensor<int32_t> _reference{};
1659};
Pablo Tello299025a2017-09-29 11:30:12 +01001660} // namespace validation
1661} // namespace test
1662} // namespace arm_compute
George Wort2d7e6832019-02-22 16:37:41 +00001663#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */