blob: 5e2154592eafb7ae2ade7df9302ebd7236429de8 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2017-2021 Arm Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
Michele Di Giorgiob54ba282020-01-14 15:31:55 +000027#include "arm_compute/core/KernelDescriptors.h"
Pablo Tello299025a2017-09-29 11:30:12 +010028#include "arm_compute/core/TensorShape.h"
29#include "arm_compute/core/Types.h"
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Pablo Tello299025a2017-09-29 11:30:12 +010031#include "tests/AssetsLibrary.h"
32#include "tests/Globals.h"
33#include "tests/IAccessor.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Fixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010038
39#include <random>
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
George Wort2d7e6832019-02-22 16:37:41 +000047namespace
48{
49template <typename U>
50void fill(U &&tensor, int i)
51{
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000052 switch(tensor.data_type())
53 {
54 case DataType::QSYMM8_PER_CHANNEL:
55 {
56 int min_bound = 128;
57 int max_bound = -127;
58 for(size_t j = 0; j < tensor.quantization_info().scale().size(); j++)
59 {
60 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
61 if(bounds.first < min_bound)
62 {
63 min_bound = bounds.first;
64 }
65 if(bounds.second > max_bound)
66 {
67 max_bound = bounds.second;
68 }
69 }
70 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
71 library->fill(tensor, distribution, i);
72 break;
73 }
74 case DataType::QASYMM8:
75 {
76 std::uniform_int_distribution<uint8_t> distribution(1, 254);
77 library->fill(tensor, distribution, i);
78 break;
79 }
80 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +000081 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +000082 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +000083 library->fill(tensor, distribution, i);
84 break;
85 }
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000086 case DataType::F32:
87 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +000088 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000089 library->fill(tensor, distribution, i);
90 break;
91 }
92 default:
93 library->fill_tensor_uniform(tensor, i);
94 }
George Wort2d7e6832019-02-22 16:37:41 +000095}
96
97template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
98TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +000099 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100100 QuantizationInfo b_qinfo = QuantizationInfo(), bool reshape_b_only_on_first_run = false)
George Wort2d7e6832019-02-22 16:37:41 +0000101{
102 // Create tensors
Manuel Bottini959c26d2019-12-02 16:22:35 +0000103 DataType data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
104
105 TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000106 TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
Manuel Bottini959c26d2019-12-02 16:22:35 +0000107 TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1);
George Wort2d7e6832019-02-22 16:37:41 +0000108
109 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
George Wort2d7e6832019-02-22 16:37:41 +0000110
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000111 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
112 {
113 b.info()->set_quantization_info(b_qinfo);
114 }
115 else
116 {
117 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
118 }
George Wort2d7e6832019-02-22 16:37:41 +0000119 TensorType bias;
120 if(is_fused)
121 {
122 TensorShape bias_shape(shape_b[0]);
123 bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
124 }
125
126 // Create and configure function
127 // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
128 FunctionType gemmlowp;
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100129 gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, reshape_b_only_on_first_run, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false,
130 output_stage));
George Wort2d7e6832019-02-22 16:37:41 +0000131
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100132 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
133 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
134 ARM_COMPUTE_ASSERT(output.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000135
Giorgio Arena63825e82021-03-25 14:54:50 +0000136 add_padding_x({ &a, &b, &output });
137
George Wort2d7e6832019-02-22 16:37:41 +0000138 // Allocate tensors
139 a.allocator()->allocate();
140 b.allocator()->allocate();
141 output.allocator()->allocate();
142
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100143 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
144 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
145 ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000146
147 // Fill tensors
148 fill(AccessorType(a), 0);
149 fill(AccessorType(b), 1);
150
151 if(is_fused)
152 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100153 ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000154 bias.allocator()->allocate();
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100155 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
George Wort2d7e6832019-02-22 16:37:41 +0000156 fill(AccessorType(bias), 2);
157 }
George Wort2d7e6832019-02-22 16:37:41 +0000158 // Compute GEMM function
159 gemmlowp.run();
160 return output;
161}
162
Manuel Bottini959c26d2019-12-02 16:22:35 +0000163template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000164SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000165 DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +0000166{
167 TensorShape shape_a_to_use = shape_a;
168 if(reinterpret_input_as_3d)
169 {
170 // Collapse the second and third dimension if the input is 3D
171 shape_a_to_use.collapse(2U, 1U);
172 }
173
174 // Create reference
Manuel Bottini959c26d2019-12-02 16:22:35 +0000175 SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1 };
176 SimpleTensor<TW> b{ shape_b, data_type_b, 1, data_type_b == DataType::QSYMM8_PER_CHANNEL ? b_qinfo : QuantizationInfo(1.0f / 255, b_offset) };
George Wort2d7e6832019-02-22 16:37:41 +0000177
178 // Fill reference
179 fill(a, 0);
180 fill(b, 1);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000181 return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>(a, b, shape_output, a_offset, b_offset);
George Wort2d7e6832019-02-22 16:37:41 +0000182}
183}
184
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100185template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
Gian Marcoe75a02b2017-11-08 12:24:09 +0000186class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +0100187{
188public:
189 template <typename...>
George Wort2d7e6832019-02-22 16:37:41 +0000190 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100191 {
George Wort2d7e6832019-02-22 16:37:41 +0000192 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
193 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100194 }
195
196protected:
George Wort2d7e6832019-02-22 16:37:41 +0000197 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100198 {
George Wort2d7e6832019-02-22 16:37:41 +0000199 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100200 }
201
George Wort2d7e6832019-02-22 16:37:41 +0000202 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100203 {
George Wort2d7e6832019-02-22 16:37:41 +0000204 return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100205 }
206
Pablo Tello6ff12a02017-11-02 16:09:35 +0000207 TensorType _target{};
208 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100209};
210
Manuel Bottini959c26d2019-12-02 16:22:35 +0000211template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t>
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100212class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputGenericValidationFixture : public framework::Fixture
George Wort2d7e6832019-02-22 16:37:41 +0000213{
214public:
215 template <typename...>
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100216 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b,
217 bool reshape_b_only_on_first_run)
George Wort2d7e6832019-02-22 16:37:41 +0000218 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100219 ARM_COMPUTE_ASSERT(output_stage.type != GEMMLowpOutputStageType::NONE);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000220 DataType data_type_a = data_type_b == DataType::QASYMM8_SIGNED ? DataType::QASYMM8_SIGNED : DataType::QASYMM8;
221
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000222 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
223 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000224 output_stage.is_quantized_per_channel = true;
225 const size_t num_channels = shape_b[0];
226 std::vector<float> scales(num_channels);
227 std::uniform_real_distribution<float> distribution(0.f, 1.f);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000228 library->fill(scales, distribution, 0);
229 output_stage.gemmlowp_multipliers.resize(num_channels);
230 output_stage.gemmlowp_shifts.resize(num_channels);
231 for(size_t i = 0; i < num_channels; ++i)
232 {
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000233 quantization::calculate_quantized_multiplier(scales[i], &output_stage.gemmlowp_multipliers[i], &output_stage.gemmlowp_shifts[i]);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000234 }
235
Manuel Bottini959c26d2019-12-02 16:22:35 +0000236 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100237 _target = compute_target(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales), reshape_b_only_on_first_run);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000238 }
239 else
240 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000241 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100242 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo(), reshape_b_only_on_first_run);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000243 }
George Wort2d7e6832019-02-22 16:37:41 +0000244 }
245
246protected:
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000247 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage,
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100248 DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo, bool reshape_b_only_on_first_run = false)
George Wort2d7e6832019-02-22 16:37:41 +0000249 {
250 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100251 output_stage, data_type_a, data_type_b, b_qinfo, reshape_b_only_on_first_run);
George Wort2d7e6832019-02-22 16:37:41 +0000252 }
253
Manuel Bottini959c26d2019-12-02 16:22:35 +0000254 SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
255 GEMMLowpOutputStageInfo output_stage, DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000256 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000257 SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW>(shape_a, shape_b, shape_output, a_offset, b_offset, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000258
259 TensorShape bias_shape(shape_b[0]);
260 SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
261 fill(bias, 2);
262
263 switch(output_stage.type)
264 {
265 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000266 return reference::gemmlowp_quantize_down_scale<int32_t, TW>(output, bias,
267 output_stage.gemmlowp_offset, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000268 break;
269 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000270 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TW>(output, bias,
271 output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000272 break;
273 default:
274 ARM_COMPUTE_ERROR("Not Supported!");
275 }
276 }
277
Manuel Bottini959c26d2019-12-02 16:22:35 +0000278 TensorType _target{};
279 SimpleTensor<TI> _reference{};
George Wort2d7e6832019-02-22 16:37:41 +0000280};
281
Giorgio Arena5f6fdc12021-06-09 15:23:06 +0100282template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t>
283class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public
284 GEMMLowpMatrixMultiplyCoreFusedOffsetOutputGenericValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW>
285{
286public:
287 template <typename...>
288 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b)
289 {
290 GEMMLowpMatrixMultiplyCoreFusedOffsetOutputGenericValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW>::setup(shape_a, shape_b,
291 shape_output, a_offset, b_offset, output_stage, data_type_b, false);
292 }
293};
294
Gian Marcoe75a02b2017-11-08 12:24:09 +0000295template <typename TensorType, typename AccessorType, typename FunctionType>
296class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
297{
298public:
299 template <typename...>
Gian Marco6b77e912017-11-17 09:27:57 +0000300 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000301 {
Gian Marco6b77e912017-11-17 09:27:57 +0000302 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
303 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000304 }
305
306protected:
307 template <typename U>
308 void fill(U &&tensor, int i)
309 {
310 std::uniform_int_distribution<> distribution(-6000, 6000);
311 library->fill(tensor, distribution, i);
312 }
313
Gian Marco6b77e912017-11-17 09:27:57 +0000314 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000315 {
Gian Marco6b77e912017-11-17 09:27:57 +0000316 TensorShape shape_bias(shape[0]);
317
Gian Marcoe75a02b2017-11-08 12:24:09 +0000318 // Create tensors
319 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
Gian Marco6b77e912017-11-17 09:27:57 +0000320 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
321 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000322
323 // Create and configure function
Luca Foschiani4b869532020-02-13 15:07:36 +0000324 FunctionType output_stage;
325 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
326 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
327 output_stage_info.gemmlowp_offset = result_offset;
328 output_stage_info.gemmlowp_multiplier = result_mult_int;
329 output_stage_info.gemmlowp_shift = result_shift;
330 output_stage_info.gemmlowp_min_bound = min;
331 output_stage_info.gemmlowp_max_bound = max;
332 output_stage_info.output_data_type = DataType::QASYMM8;
333 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000334
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100335 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
336 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Gian Marcoe75a02b2017-11-08 12:24:09 +0000337
338 // Allocate tensors
339 a.allocator()->allocate();
Gian Marco6b77e912017-11-17 09:27:57 +0000340 c.allocator()->allocate();
Gian Marcoe75a02b2017-11-08 12:24:09 +0000341
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100342 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
343 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Gian Marcoe75a02b2017-11-08 12:24:09 +0000344
Gian Marco6b77e912017-11-17 09:27:57 +0000345 // Fill tensor
Gian Marcoe75a02b2017-11-08 12:24:09 +0000346 fill(AccessorType(a), 0);
347
Gian Marco6b77e912017-11-17 09:27:57 +0000348 if(add_bias)
349 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100350 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Gian Marco6b77e912017-11-17 09:27:57 +0000351
352 // Allocate bias tensor
353 b.allocator()->allocate();
354
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100355 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Gian Marco6b77e912017-11-17 09:27:57 +0000356
357 // Fill tensor
358 fill(AccessorType(b), 1);
359 }
360
Gian Marcoe75a02b2017-11-08 12:24:09 +0000361 // Compute GEMM function
362 output_stage.run();
Gian Marco6b77e912017-11-17 09:27:57 +0000363 return c;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000364 }
365
Gian Marco6b77e912017-11-17 09:27:57 +0000366 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000367 {
368 // Create reference
Gian Marco6b77e912017-11-17 09:27:57 +0000369 TensorShape shape_bias(shape[0]);
370
Gian Marcoe75a02b2017-11-08 12:24:09 +0000371 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
Gian Marco6b77e912017-11-17 09:27:57 +0000372 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000373
374 // Fill reference
375 fill(a, 0);
376
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000377 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
378 const std::vector<int32_t> result_shift_vec = { result_shift };
379
Gian Marco6b77e912017-11-17 09:27:57 +0000380 if(add_bias)
381 {
382 // Fill bias
383 fill(b, 1);
384
Manuel Bottini959c26d2019-12-02 16:22:35 +0000385 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000386 }
387 else
388 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000389 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000390 }
Gian Marcoe75a02b2017-11-08 12:24:09 +0000391 }
392
393 TensorType _target{};
394 SimpleTensor<uint8_t> _reference{};
395};
Gian Marco58c57942017-11-28 09:10:03 +0000396
397template <typename TensorType, typename AccessorType, typename FunctionType>
Luca Foschiani4b869532020-02-13 15:07:36 +0000398class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture
399{
400public:
401 template <typename...>
402 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
403 {
404 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
405 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
406 }
407
408protected:
409 template <typename U>
410 void fill(U &&tensor, int i)
411 {
412 std::uniform_int_distribution<> distribution(-6000, 6000);
413 library->fill(tensor, distribution, i);
414 }
415
416 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
417 {
418 TensorShape shape_bias(shape[0]);
419
420 // Create tensors
421 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
422 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
423 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
424
425 // Create and configure function
426 FunctionType output_stage;
427 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
428 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
429 output_stage_info.gemmlowp_offset = result_offset;
430 output_stage_info.gemmlowp_multiplier = result_mult_int;
431 output_stage_info.gemmlowp_shift = result_shift;
432 output_stage_info.gemmlowp_min_bound = min;
433 output_stage_info.gemmlowp_max_bound = max;
434 output_stage_info.output_data_type = DataType::QASYMM8_SIGNED;
435 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
436
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100437 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
438 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000439
440 // Allocate tensors
441 a.allocator()->allocate();
442 c.allocator()->allocate();
443
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100444 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
445 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000446
447 // Fill tensor
448 fill(AccessorType(a), 0);
449
450 if(add_bias)
451 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100452 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000453
454 // Allocate bias tensor
455 b.allocator()->allocate();
456
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100457 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Luca Foschiani4b869532020-02-13 15:07:36 +0000458
459 // Fill tensor
460 fill(AccessorType(b), 1);
461 }
462
463 // Compute GEMM function
464 output_stage.run();
465 return c;
466 }
467
468 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
469 {
470 // Create reference
471 TensorShape shape_bias(shape[0]);
472
473 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
474 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
475
476 // Fill reference
477 fill(a, 0);
478
479 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
480 const std::vector<int32_t> result_shift_vec = { result_shift };
481
482 if(add_bias)
483 {
484 // Fill bias
485 fill(b, 1);
486
487 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
488 }
489 else
490 {
491 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
492 }
493 }
494
495 TensorType _target{};
496 SimpleTensor<int8_t> _reference{};
497};
498
499template <typename TensorType, typename AccessorType, typename FunctionType>
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000500class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture
501{
502public:
503 template <typename...>
504 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
505 {
506 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
507 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
508 }
509
510protected:
511 template <typename U>
512 void fill(U &&tensor, int i)
513 {
514 std::uniform_int_distribution<> distribution(-6000, 6000);
515 library->fill(tensor, distribution, i);
516 }
517
518 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
519 {
520 TensorShape shape_bias(shape[0]);
521
522 // Create tensors
523 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
524 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
525 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
526
527 // Create and configure function
528 FunctionType output_stage;
529 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
530
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100531 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
532 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000533
534 // Allocate tensors
535 a.allocator()->allocate();
536 c.allocator()->allocate();
537
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100538 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
539 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000540
541 // Fill tensor
542 fill(AccessorType(a), 0);
543
544 if(add_bias)
545 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100546 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000547
548 // Allocate bias tensor
549 b.allocator()->allocate();
550
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100551 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000552
553 // Fill tensor
554 fill(AccessorType(b), 1);
555 }
556
557 // Compute GEMM function
558 output_stage.run();
559 return c;
560 }
561
562 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
563 bool add_bias)
564 {
565 // Create reference
566 TensorShape shape_bias(shape[0]);
567
568 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
569 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
570
571 // Fill reference
572 fill(a, 0);
573
574 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
575 const std::vector<int32_t> result_shift_vec = { result_shift };
576
577 if(add_bias)
578 {
579 // Fill bias
580 fill(b, 1);
581
582 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
583 }
584 else
585 {
586 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
587 }
588 }
589
590 TensorType _target{};
591 SimpleTensor<int8_t> _reference{};
592};
593
594template <typename TensorType, typename AccessorType, typename FunctionType>
Gian Marco58c57942017-11-28 09:10:03 +0000595class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
596{
597public:
598 template <typename...>
599 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
600 {
601 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
602 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
603 }
604
605protected:
606 template <typename U>
607 void fill(U &&tensor, int i)
608 {
609 std::uniform_int_distribution<> distribution(-6000, 6000);
610 library->fill(tensor, distribution, i);
611 }
612
613 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
614 {
615 TensorShape shape_bias(shape[0]);
616
617 // Create tensors
618 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
619 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
620 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
621
622 // Create and configure function
623 FunctionType output_stage;
624 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
625
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100626 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
627 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000628
629 // Allocate tensors
630 a.allocator()->allocate();
631 c.allocator()->allocate();
632
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100633 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
634 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000635
636 // Fill tensor
637 fill(AccessorType(a), 0);
638
639 if(add_bias)
640 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100641 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000642
643 // Allocate bias tensor
644 b.allocator()->allocate();
645
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100646 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Gian Marco58c57942017-11-28 09:10:03 +0000647
648 // Fill tensor
649 fill(AccessorType(b), 1);
650 }
651
652 // Compute GEMM function
653 output_stage.run();
654 return c;
655 }
656
657 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
658 bool add_bias)
659 {
660 // Create reference
661 TensorShape shape_bias(shape[0]);
662
663 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
664 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
665
666 // Fill reference
667 fill(a, 0);
668
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000669 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
670 const std::vector<int32_t> result_shift_vec = { result_shift };
671
Gian Marco58c57942017-11-28 09:10:03 +0000672 if(add_bias)
673 {
674 // Fill bias
675 fill(b, 1);
676
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000677 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000678 }
679 else
680 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000681 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000682 }
683 }
684
685 TensorType _target{};
686 SimpleTensor<uint8_t> _reference{};
687};
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000688
Sheri Zhang1b14c752020-03-09 14:29:52 +0000689template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
690class GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture : public framework::Fixture
691{
692public:
693 template <typename...>
694 void setup(DataType data_type, TensorShape shape, float result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
695 {
696 _target = compute_target(data_type, shape, result_real_multiplier, result_offset, min, max, add_bias);
697 _reference = compute_reference(shape, result_real_multiplier, result_offset, min, max, add_bias);
698 }
699
700protected:
701 template <typename U>
702 void fill(U &&tensor, int i)
703 {
704 // To avoid data all being clampped
705 std::uniform_int_distribution<> distribution(-500, 500);
706 library->fill(tensor, distribution, i);
707 }
708
709 TensorType compute_target(DataType data_type, const TensorShape &shape, float result_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
710 {
711 TensorShape shape_bias(shape[0]);
712
713 // Create tensors
714 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
715 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
716 TensorType c = create_tensor<TensorType>(shape, data_type, 1);
717
718 // create output stage info
719 GEMMLowpOutputStageInfo info;
720 info.gemmlowp_max_bound = max;
721 info.gemmlowp_min_bound = min;
722 info.gemmlowp_real_multiplier = result_multiplier;
723 info.gemmlowp_offset = result_offset;
724 info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT;
725 info.output_data_type = data_type;
726
727 // Create and configure function
728 FunctionType output_stage;
729 output_stage.configure(&a, add_bias ? &b : nullptr, &c, info);
730
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100731 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
732 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000733
734 // Allocate tensors
735 a.allocator()->allocate();
736 c.allocator()->allocate();
737
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100738 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
739 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000740
741 // Fill tensor
742 fill(AccessorType(a), 0);
743
744 if(add_bias)
745 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100746 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000747
748 // Allocate bias tensor
749 b.allocator()->allocate();
750
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100751 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Sheri Zhang1b14c752020-03-09 14:29:52 +0000752
753 // Fill tensor
754 fill(AccessorType(b), 1);
755 }
756
757 // Compute GEMM function
758 output_stage.run();
759 return c;
760 }
761
762 SimpleTensor<T> compute_reference(const TensorShape &shape, float_t result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
763 {
764 // Create reference
765 TensorShape shape_bias(shape[0]);
766
767 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
768 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
769
770 // Fill reference
771 fill(a, 0);
772
773 const std::vector<float_t> result_float_multiplier_vec = { result_real_multiplier };
774
775 if(add_bias)
776 {
777 // Fill bias
778 fill(b, 1);
779
780 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, b, result_float_multiplier_vec, result_offset, min, max);
781 }
782 else
783 {
784 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, result_float_multiplier_vec, result_offset, min, max);
785 }
786 }
787
788 TensorType _target{};
789 SimpleTensor<T> _reference{};
790};
791
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100792template <typename TensorType, typename AccessorType, typename FunctionType>
793class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
794{
795public:
796 template <typename...>
797 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
798 {
799 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
800 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
801 }
802
803protected:
804 template <typename U>
805 void fill(U &&tensor, int i)
806 {
807 std::uniform_int_distribution<> distribution(-6000, 6000);
808 library->fill(tensor, distribution, i);
809 }
810
811 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
812 {
813 TensorShape shape_bias(shape[0]);
814
815 // Create tensors
816 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
817 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
818 TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
819
820 // Create and configure function
821 FunctionType output_stage;
822 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
823
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100824 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
825 ARM_COMPUTE_ASSERT(c.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100826
827 // Allocate tensors
828 a.allocator()->allocate();
829 c.allocator()->allocate();
830
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100831 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
832 ARM_COMPUTE_ASSERT(!c.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100833
834 // Fill tensor
835 fill(AccessorType(a), 0);
836
837 if(add_bias)
838 {
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100839 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100840
841 // Allocate bias tensor
842 b.allocator()->allocate();
843
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100844 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100845
846 // Fill tensor
847 fill(AccessorType(b), 1);
848 }
849
850 // Compute GEMM function
851 output_stage.run();
852 return c;
853 }
854
855 SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
856 bool add_bias)
857 {
858 // Create reference
859 TensorShape shape_bias(shape[0]);
860
861 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
862 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
863
864 // Fill reference
865 fill(a, 0);
866
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000867 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
868 const std::vector<int32_t> result_shift_vec = { result_shift };
869
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100870 if(add_bias)
871 {
872 // Fill bias
873 fill(b, 1);
874
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000875 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100876 }
877 else
878 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000879 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100880 }
881 }
882
883 TensorType _target{};
884 SimpleTensor<int16_t> _reference{};
885};
886
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100887template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000888class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
889{
890public:
891 template <typename...>
892 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
Sheri Zhang28287af2020-02-25 14:13:54 +0000893 bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000894 {
895 GEMMLHSMatrixInfo lhs_info;
896 lhs_info.m0 = m0;
897 lhs_info.k0 = k0;
898 lhs_info.v0 = v0;
899 lhs_info.interleave = interleave_lhs;
900 lhs_info.transpose = false;
901
902 GEMMRHSMatrixInfo rhs_info;
903 rhs_info.n0 = n0;
904 rhs_info.k0 = k0;
905 rhs_info.h0 = h0;
906 rhs_info.interleave = interleave_rhs;
907 rhs_info.transpose = true;
908
909 // Set the tensor shapes for LHS and RHS matrices
910 const TensorShape lhs_shape(k, m, batch_size);
911 const TensorShape rhs_shape(n, k, batch_size);
912
Sheri Zhang28287af2020-02-25 14:13:54 +0000913 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
914 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000915 }
916
917protected:
918 template <typename U>
919 void fill(U &&tensor, int i)
920 {
Sheri Zhang28287af2020-02-25 14:13:54 +0000921 switch(tensor.data_type())
922 {
923 case DataType::QASYMM8:
924 {
925 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
926 std::uniform_int_distribution<> distribution(1, 254);
927 library->fill(tensor, distribution, i);
928 }
929 break;
930 case DataType::QASYMM8_SIGNED:
931 {
932 std::uniform_int_distribution<> distribution(-127, 126);
933 library->fill(tensor, distribution, i);
934 }
935 break;
936 default:
937 ARM_COMPUTE_ERROR("Unsupported data type");
938 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000939 }
940
Sheri Zhang28287af2020-02-25 14:13:54 +0000941 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000942 {
943 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +0000944 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
945 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000946 TensorType lhs_reshaped;
947 TensorType rhs_reshaped;
948 TensorType dst;
949
950 const unsigned int M = lhs_shape[1];
951 const unsigned int N = rhs_shape[0];
952 const unsigned int K = lhs_shape[0];
953
954 // The output tensor will be auto-initialized within the function
955
956 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100957 ReshapeLHSOperatorType reshape_lhs;
958 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000959 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100960 reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
961 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100962 gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000963
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100964 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
965 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000966
Giorgio Arena63825e82021-03-25 14:54:50 +0000967 add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
968
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000969 // Allocate tensors
970 lhs.allocator()->allocate();
971 rhs.allocator()->allocate();
972 lhs_reshaped.allocator()->allocate();
973 rhs_reshaped.allocator()->allocate();
974 dst.allocator()->allocate();
975
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +0100976 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
977 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
978 ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
979 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
980 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000981
982 // Fill tensors
983 fill(AccessorType(lhs), 0);
984 fill(AccessorType(rhs), 1);
985
986 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +0100987 ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
988 reshape_lhs.run(reshape_lhs_pack);
989 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
990 reshape_rhs.run(reshape_rhs_pack);
Georgios Pinitas4a578b92021-06-25 12:13:49 +0100991 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
992 gemm.run(gemm_pack);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000993
994 return dst;
995 }
996
Sheri Zhang28287af2020-02-25 14:13:54 +0000997 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000998 {
999 TensorShape dst_shape = lhs_shape;
1000 dst_shape[0] = rhs_shape[0];
1001 dst_shape[1] = lhs_shape[1];
1002
Sheri Zhang28287af2020-02-25 14:13:54 +00001003 switch(data_type)
1004 {
1005 case DataType::QASYMM8:
1006 {
1007 // Create reference
1008 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1009 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001010
Sheri Zhang28287af2020-02-25 14:13:54 +00001011 // Fill reference
1012 fill(lhs, 0);
1013 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001014
Sheri Zhang28287af2020-02-25 14:13:54 +00001015 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1016 }
1017 case DataType::QASYMM8_SIGNED:
1018 {
1019 // Create reference
1020 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1021 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1022
1023 // Fill reference
1024 fill(lhs, 0);
1025 fill(rhs, 1);
1026
1027 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1028 }
1029 default:
1030 ARM_COMPUTE_ERROR("Unsupported data type");
1031 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001032 }
1033
1034 TensorType _target{};
1035 SimpleTensor<int32_t> _reference{};
1036};
1037
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001038template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001039class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
1040{
1041public:
1042 template <typename...>
1043 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
Sheri Zhang28287af2020-02-25 14:13:54 +00001044 bool interleave_lhs, bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001045 {
1046 GEMMLHSMatrixInfo lhs_info;
1047 lhs_info.m0 = m0;
1048 lhs_info.k0 = k0;
1049 lhs_info.v0 = v0;
1050 lhs_info.interleave = interleave_lhs;
1051 lhs_info.transpose = false;
1052
1053 GEMMRHSMatrixInfo rhs_info;
1054 rhs_info.n0 = n0;
1055 rhs_info.k0 = k0;
1056 rhs_info.h0 = h0;
1057 rhs_info.interleave = interleave_rhs;
1058 rhs_info.transpose = true;
1059
1060 // In case of GEMM3D, m is the product between m_w and m_h
1061 const unsigned int m = m_w * m_h;
1062
1063 // Set the tensor shapes for LHS and RHS matrices
1064 const TensorShape lhs_shape(k, m, batch_size);
1065 const TensorShape rhs_shape(n, k, batch_size);
1066
Sheri Zhang28287af2020-02-25 14:13:54 +00001067 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1068 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001069 }
1070
1071protected:
1072 template <typename U>
1073 void fill(U &&tensor, int i)
1074 {
Sheri Zhang28287af2020-02-25 14:13:54 +00001075 switch(tensor.data_type())
1076 {
1077 case DataType::QASYMM8:
1078 {
1079 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1080 std::uniform_int_distribution<> distribution(1, 254);
1081 library->fill(tensor, distribution, i);
1082 }
1083 break;
1084 case DataType::QASYMM8_SIGNED:
1085 {
1086 std::uniform_int_distribution<> distribution(-127, 126);
1087 library->fill(tensor, distribution, i);
1088 }
1089 break;
1090 default:
1091 ARM_COMPUTE_ERROR("Unsupported data type");
1092 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001093 }
1094
Sheri Zhang28287af2020-02-25 14:13:54 +00001095 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h,
1096 DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001097 {
1098 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +00001099 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1100 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001101 TensorType lhs_reshaped;
1102 TensorType rhs_reshaped;
1103 TensorType dst;
1104
1105 const unsigned int M = lhs_shape[1];
1106 const unsigned int N = rhs_shape[0];
1107 const unsigned int K = lhs_shape[0];
1108
1109 // The output tensor will be auto-initialized within the function
1110
1111 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001112 ReshapeLHSOperatorType reshape_lhs;
1113 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001114 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001115 reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
1116 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001117 gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001118
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001119 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1120 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001121
Giorgio Arena63825e82021-03-25 14:54:50 +00001122 add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
1123
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001124 // Allocate tensors
1125 lhs.allocator()->allocate();
1126 rhs.allocator()->allocate();
1127 lhs_reshaped.allocator()->allocate();
1128 rhs_reshaped.allocator()->allocate();
1129 dst.allocator()->allocate();
1130
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001131 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1132 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1133 ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable());
1134 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
1135 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001136
1137 // Fill tensors
1138 fill(AccessorType(lhs), 0);
1139 fill(AccessorType(rhs), 1);
1140
1141 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001142 ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
1143 reshape_lhs.run(reshape_lhs_pack);
1144 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
1145 reshape_rhs.run(reshape_rhs_pack);
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001146 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
1147 gemm.run(gemm_pack);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001148
1149 return dst;
1150 }
1151
Sheri Zhang28287af2020-02-25 14:13:54 +00001152 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001153 {
1154 TensorShape dst_shape = lhs_shape;
1155 dst_shape.set(0, rhs_shape[0]);
1156 dst_shape.set(1, lhs_shape[1] / m_h);
1157 dst_shape.set(2, m_h);
1158 dst_shape.set(3, lhs_shape[2]);
1159
Sheri Zhang28287af2020-02-25 14:13:54 +00001160 switch(data_type)
1161 {
1162 case DataType::QASYMM8:
1163 {
1164 // Create reference
1165 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1166 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001167
Sheri Zhang28287af2020-02-25 14:13:54 +00001168 // Fill reference
1169 fill(lhs, 0);
1170 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001171
Sheri Zhang28287af2020-02-25 14:13:54 +00001172 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1173 }
1174 case DataType::QASYMM8_SIGNED:
1175 {
1176 // Create reference
1177 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1178 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1179
1180 // Fill reference
1181 fill(lhs, 0);
1182 fill(rhs, 1);
1183
1184 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1185 }
1186 default:
1187 ARM_COMPUTE_ERROR("Unsupported data type");
1188 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001189 }
1190
1191 TensorType _target{};
1192 SimpleTensor<int32_t> _reference{};
1193};
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001194
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001195template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001196class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
1197{
1198public:
1199 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001200 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1201 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001202 {
1203 GEMMLHSMatrixInfo lhs_info;
1204 lhs_info.m0 = m0;
1205 lhs_info.k0 = k0;
1206
1207 GEMMRHSMatrixInfo rhs_info;
1208 rhs_info.n0 = n0;
1209 rhs_info.k0 = k0;
1210 rhs_info.h0 = h0;
1211 rhs_info.interleave = interleave_rhs;
1212 rhs_info.transpose = transpose_rhs;
1213
1214 // Set the tensor shapes for LHS and RHS matrices
1215 const TensorShape lhs_shape(k, m, batch_size);
1216 const TensorShape rhs_shape(n, k, batch_size);
1217
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001218 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
1219 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001220 }
1221
1222protected:
1223 template <typename U>
1224 void fill(U &&tensor, int i)
1225 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001226 switch(tensor.data_type())
1227 {
1228 case DataType::QASYMM8:
1229 {
1230 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1231 std::uniform_int_distribution<> distribution(1, 254);
1232 library->fill(tensor, distribution, i);
1233 }
1234 break;
1235 case DataType::QASYMM8_SIGNED:
1236 {
1237 std::uniform_int_distribution<> distribution(-127, 126);
1238 library->fill(tensor, distribution, i);
1239 }
1240 break;
1241 default:
1242 ARM_COMPUTE_ERROR("Unsupported data type");
1243 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001244 }
1245
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001246 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1247 const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001248 {
1249 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001250 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1251 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001252 TensorType rhs_reshaped;
1253 TensorType dst;
1254
1255 const unsigned int M = lhs_shape[1];
1256 const unsigned int N = rhs_shape[0];
1257 const unsigned int K = lhs_shape[0];
1258
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001259 GEMMKernelInfo gemm_info;
1260 gemm_info.m = M;
1261 gemm_info.n = N;
1262 gemm_info.k = K;
1263 gemm_info.lhs_info = lhs_info;
1264 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001265 // The output tensor will be auto-initialized within the function
1266
1267 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001268 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001269 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001270 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001271 gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001272
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001273 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1274 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001275
Giorgio Arena63825e82021-03-25 14:54:50 +00001276 add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
1277
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001278 // Allocate tensors
1279 lhs.allocator()->allocate();
1280 rhs.allocator()->allocate();
1281 rhs_reshaped.allocator()->allocate();
1282 dst.allocator()->allocate();
1283
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001284 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1285 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1286 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
1287 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001288
1289 // Fill tensors
1290 fill(AccessorType(lhs), 0);
1291 fill(AccessorType(rhs), 1);
1292
1293 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001294 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
1295 reshape_rhs.run(reshape_rhs_pack);
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001296 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
1297 gemm.run(gemm_pack);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001298
1299 return dst;
1300 }
1301
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001302 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001303 {
1304 TensorShape dst_shape = lhs_shape;
1305 dst_shape[0] = rhs_shape[0];
1306 dst_shape[1] = lhs_shape[1];
1307
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001308 if(data_type == DataType::QASYMM8)
1309 {
1310 // Create reference
1311 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1312 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001313
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001314 // Fill reference
1315 fill(lhs, 0);
1316 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001317
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001318 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1319 }
1320 else
1321 {
1322 // Create reference
1323 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1324 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1325
1326 // Fill reference
1327 fill(lhs, 0);
1328 fill(rhs, 1);
1329
1330 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1331 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001332 }
1333
1334 TensorType _target{};
1335 SimpleTensor<int32_t> _reference{};
1336};
1337
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001338template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001339class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
1340{
1341public:
1342 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001343 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1344 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001345 {
1346 GEMMLHSMatrixInfo lhs_info;
1347 lhs_info.m0 = m0;
1348 lhs_info.k0 = k0;
1349
1350 GEMMRHSMatrixInfo rhs_info;
1351 rhs_info.n0 = n0;
1352 rhs_info.k0 = k0;
1353 rhs_info.h0 = h0;
1354 rhs_info.interleave = interleave_rhs;
1355 rhs_info.transpose = transpose_rhs;
1356
1357 // In case of GEMM3D, m is the product between m_w and m_h
1358 const unsigned int m = m_w * m_h;
1359
1360 // Set the tensor shapes for LHS and RHS matrices
1361 const TensorShape lhs_shape(k, m, batch_size);
1362 const TensorShape rhs_shape(n, k, batch_size);
1363
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001364 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1365 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001366 }
1367
1368protected:
1369 template <typename U>
1370 void fill(U &&tensor, int i)
1371 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001372 switch(tensor.data_type())
1373 {
1374 case DataType::QASYMM8:
1375 {
1376 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1377 std::uniform_int_distribution<> distribution(1, 254);
1378 library->fill(tensor, distribution, i);
1379 }
1380 break;
1381 case DataType::QASYMM8_SIGNED:
1382 {
1383 std::uniform_int_distribution<> distribution(-127, 126);
1384 library->fill(tensor, distribution, i);
1385 }
1386 break;
1387 default:
1388 ARM_COMPUTE_ERROR("Unsupported data type");
1389 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001390 }
1391
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001392 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1393 const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001394 {
1395 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001396 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1397 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001398 TensorType rhs_reshaped;
1399 TensorType dst;
1400
1401 const unsigned int M = lhs_shape[1];
1402 const unsigned int N = rhs_shape[0];
1403 const unsigned int K = lhs_shape[0];
1404
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001405 GEMMKernelInfo gemm_info;
1406 gemm_info.m = M;
1407 gemm_info.n = N;
1408 gemm_info.k = K;
1409 gemm_info.depth_output_gemm3d = m_h;
1410 gemm_info.lhs_info = lhs_info;
1411 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001412 // The output tensor will be auto-initialized within the function
1413
1414 // Create and configure function
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001415 ReshapeRHSOperatorType reshape_rhs;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001416 GEMMFunctionType gemm;
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001417 reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001418 gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001419
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001420 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1421 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001422
Giorgio Arena63825e82021-03-25 14:54:50 +00001423 add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
1424
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001425 // Allocate tensors
1426 lhs.allocator()->allocate();
1427 rhs.allocator()->allocate();
1428 rhs_reshaped.allocator()->allocate();
1429 dst.allocator()->allocate();
1430
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001431 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1432 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1433 ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable());
1434 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001435
1436 // Fill tensors
1437 fill(AccessorType(lhs), 0);
1438 fill(AccessorType(rhs), 1);
1439
1440 // Compute GEMM
Georgios Pinitas856f66e2021-04-22 21:13:21 +01001441 ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
1442 reshape_rhs.run(reshape_rhs_pack);
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001443 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } });
1444 gemm.run(gemm_pack);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001445
1446 return dst;
1447 }
1448
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001449 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001450 {
1451 TensorShape dst_shape = lhs_shape;
1452 dst_shape.set(0, rhs_shape[0]);
1453 dst_shape.set(1, lhs_shape[1] / m_h);
1454 dst_shape.set(2, m_h);
1455 dst_shape.set(3, lhs_shape[2]);
1456
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001457 if(data_type == DataType::QASYMM8)
1458 {
1459 // Create reference
1460 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1461 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001462
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001463 // Fill reference
1464 fill(lhs, 0);
1465 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001466
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001467 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1468 }
1469 else
1470 {
1471 // Create reference
1472 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1473 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1474
1475 // Fill reference
1476 fill(lhs, 0);
1477 fill(rhs, 1);
1478
1479 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1480 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001481 }
1482
1483 TensorType _target{};
1484 SimpleTensor<int32_t> _reference{};
1485};
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001486
1487template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1488class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture
1489{
1490public:
1491 template <typename...>
1492 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1493 {
1494 GEMMLHSMatrixInfo lhs_info;
1495 lhs_info.m0 = m0;
1496 lhs_info.k0 = k0;
1497
1498 GEMMRHSMatrixInfo rhs_info;
1499 rhs_info.n0 = n0;
1500 rhs_info.k0 = k0;
1501
1502 // Set the tensor shapes for LHS and RHS matrices
1503 const TensorShape lhs_shape(k, m, batch_size);
1504 const TensorShape rhs_shape(n, k, batch_size);
1505
1506 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
1507 _reference = compute_reference(lhs_shape, rhs_shape);
1508 }
1509
1510protected:
1511 template <typename U>
1512 void fill(U &&tensor, int i)
1513 {
1514 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1515 std::uniform_int_distribution<> distribution(1, 254);
1516 library->fill(tensor, distribution, i);
1517 }
1518
1519 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
1520 {
1521 // Create tensors
1522 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1523 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1524 TensorType dst;
1525
1526 const unsigned int M = lhs_shape[1];
1527 const unsigned int N = rhs_shape[0];
1528 const unsigned int K = lhs_shape[0];
1529
1530 // The output tensor will be auto-initialized within the function
1531
1532 // Create and configure function
1533 GEMMFunctionType gemm;
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001534 gemm.configure(lhs.info(), rhs.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001535
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001536 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1537 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001538
Giorgio Arena63825e82021-03-25 14:54:50 +00001539 add_padding_x({ &lhs, &rhs, &dst });
1540
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001541 // Allocate tensors
1542 lhs.allocator()->allocate();
1543 rhs.allocator()->allocate();
1544 dst.allocator()->allocate();
1545
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001546 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1547 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1548 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001549
1550 // Fill tensors
1551 fill(AccessorType(lhs), 0);
1552 fill(AccessorType(rhs), 1);
1553
1554 // Compute GEMM
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001555 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs }, { ACL_DST, &dst } });
1556 gemm.run(gemm_pack);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001557
1558 return dst;
1559 }
1560
1561 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
1562 {
1563 TensorShape dst_shape = lhs_shape;
1564 dst_shape[0] = rhs_shape[0];
1565 dst_shape[1] = lhs_shape[1];
1566
1567 // Create reference
1568 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1569 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1570
1571 // Fill reference
1572 fill(lhs, 0);
1573 fill(rhs, 1);
1574
1575 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1576 }
1577
1578 TensorType _target{};
1579 SimpleTensor<int32_t> _reference{};
1580};
1581
1582template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1583class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture
1584{
1585public:
1586 template <typename...>
1587 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1588 {
1589 GEMMLHSMatrixInfo lhs_info;
1590 lhs_info.m0 = m0;
1591 lhs_info.k0 = k0;
1592
1593 GEMMRHSMatrixInfo rhs_info;
1594 rhs_info.n0 = n0;
1595 rhs_info.k0 = k0;
1596
1597 // In case of GEMM3D, m is the product between m_w and m_h
1598 const unsigned int m = m_w * m_h;
1599
1600 // Set the tensor shapes for LHS and RHS matrices
1601 const TensorShape lhs_shape(k, m, batch_size);
1602 const TensorShape rhs_shape(n, k, batch_size);
1603
1604 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
1605 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
1606 }
1607
1608protected:
1609 template <typename U>
1610 void fill(U &&tensor, int i)
1611 {
1612 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1613 std::uniform_int_distribution<> distribution(1, 254);
1614 library->fill(tensor, distribution, i);
1615 }
1616
1617 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
1618 {
1619 // Create tensors
1620 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1621 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1622 TensorType dst;
1623
1624 const unsigned int M = lhs_shape[1];
1625 const unsigned int N = rhs_shape[0];
1626 const unsigned int K = lhs_shape[0];
1627
1628 // The output tensor will be auto-initialized within the function
1629
1630 // Create and configure function
1631 GEMMFunctionType gemm;
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001632 gemm.configure(lhs.info(), rhs.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001633
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001634 ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
1635 ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001636
Giorgio Arena63825e82021-03-25 14:54:50 +00001637 add_padding_x({ &lhs, &rhs, &dst });
1638
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001639 // Allocate tensors
1640 lhs.allocator()->allocate();
1641 rhs.allocator()->allocate();
1642 dst.allocator()->allocate();
1643
Michele Di Giorgio4fc10b32021-04-30 18:30:41 +01001644 ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable());
1645 ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable());
1646 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001647
1648 // Fill tensors
1649 fill(AccessorType(lhs), 0);
1650 fill(AccessorType(rhs), 1);
1651
1652 // Compute GEMM
Georgios Pinitas4a578b92021-06-25 12:13:49 +01001653 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs }, { ACL_DST, &dst } });
1654 gemm.run(gemm_pack);
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001655
1656 return dst;
1657 }
1658
1659 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
1660 {
1661 TensorShape dst_shape = lhs_shape;
1662 dst_shape.set(0, rhs_shape[0]);
1663 dst_shape.set(1, lhs_shape[1] / m_h);
1664 dst_shape.set(2, m_h);
1665 dst_shape.set(3, lhs_shape[2]);
1666
1667 // Create reference
1668 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1669 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1670
1671 // Fill reference
1672 fill(lhs, 0);
1673 fill(rhs, 1);
1674
1675 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1676 }
1677
1678 TensorType _target{};
1679 SimpleTensor<int32_t> _reference{};
1680};
Pablo Tello299025a2017-09-29 11:30:12 +01001681} // namespace validation
1682} // namespace test
1683} // namespace arm_compute
George Wort2d7e6832019-02-22 16:37:41 +00001684#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */