blob: 95f49601a5d1b949b7207bb411719bc0fef98d24 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Giorgio Arenab309fc22021-01-05 09:46:16 +00002 * Copyright (c) 2017-2021 Arm Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
25#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE
26
Michele Di Giorgiob54ba282020-01-14 15:31:55 +000027#include "arm_compute/core/KernelDescriptors.h"
Pablo Tello299025a2017-09-29 11:30:12 +010028#include "arm_compute/core/TensorShape.h"
29#include "arm_compute/core/Types.h"
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000030#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Pablo Tello299025a2017-09-29 11:30:12 +010031#include "tests/AssetsLibrary.h"
32#include "tests/Globals.h"
33#include "tests/IAccessor.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Fixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010038
39#include <random>
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
George Wort2d7e6832019-02-22 16:37:41 +000047namespace
48{
49template <typename U>
50void fill(U &&tensor, int i)
51{
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000052 switch(tensor.data_type())
53 {
54 case DataType::QSYMM8_PER_CHANNEL:
55 {
56 int min_bound = 128;
57 int max_bound = -127;
58 for(size_t j = 0; j < tensor.quantization_info().scale().size(); j++)
59 {
60 std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
61 if(bounds.first < min_bound)
62 {
63 min_bound = bounds.first;
64 }
65 if(bounds.second > max_bound)
66 {
67 max_bound = bounds.second;
68 }
69 }
70 std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
71 library->fill(tensor, distribution, i);
72 break;
73 }
74 case DataType::QASYMM8:
75 {
76 std::uniform_int_distribution<uint8_t> distribution(1, 254);
77 library->fill(tensor, distribution, i);
78 break;
79 }
80 case DataType::F16:
Giorgio Arena6aeb2172020-12-15 15:45:43 +000081 {
Giorgio Arenaa8e2aeb2021-01-06 11:34:57 +000082 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
Giorgio Arena6aeb2172020-12-15 15:45:43 +000083 library->fill(tensor, distribution, i);
84 break;
85 }
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000086 case DataType::F32:
87 {
Giorgio Arena6aeb2172020-12-15 15:45:43 +000088 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000089 library->fill(tensor, distribution, i);
90 break;
91 }
92 default:
93 library->fill_tensor_uniform(tensor, i);
94 }
George Wort2d7e6832019-02-22 16:37:41 +000095}
96
97template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false>
98TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +000099 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
100 QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +0000101{
102 // Create tensors
Manuel Bottini959c26d2019-12-02 16:22:35 +0000103 DataType data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
104
105 TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000106 TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
Manuel Bottini959c26d2019-12-02 16:22:35 +0000107 TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1);
George Wort2d7e6832019-02-22 16:37:41 +0000108
109 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
George Wort2d7e6832019-02-22 16:37:41 +0000110
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000111 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
112 {
113 b.info()->set_quantization_info(b_qinfo);
114 }
115 else
116 {
117 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
118 }
George Wort2d7e6832019-02-22 16:37:41 +0000119 TensorType bias;
120 if(is_fused)
121 {
122 TensorShape bias_shape(shape_b[0]);
123 bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1);
124 }
125
126 // Create and configure function
127 // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
128 FunctionType gemmlowp;
129 // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
130 gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage));
131
132 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
133 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
134 ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
135
136 // Allocate tensors
137 a.allocator()->allocate();
138 b.allocator()->allocate();
139 output.allocator()->allocate();
140
141 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
142 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
143 ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
144
145 // Fill tensors
146 fill(AccessorType(a), 0);
147 fill(AccessorType(b), 1);
148
149 if(is_fused)
150 {
151 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
152 bias.allocator()->allocate();
153 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
154 fill(AccessorType(bias), 2);
155 }
George Wort2d7e6832019-02-22 16:37:41 +0000156 // Compute GEMM function
157 gemmlowp.run();
158 return output;
159}
160
Manuel Bottini959c26d2019-12-02 16:22:35 +0000161template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000162SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000163 DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, QuantizationInfo b_qinfo = QuantizationInfo())
George Wort2d7e6832019-02-22 16:37:41 +0000164{
165 TensorShape shape_a_to_use = shape_a;
166 if(reinterpret_input_as_3d)
167 {
168 // Collapse the second and third dimension if the input is 3D
169 shape_a_to_use.collapse(2U, 1U);
170 }
171
172 // Create reference
Manuel Bottini959c26d2019-12-02 16:22:35 +0000173 SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1 };
174 SimpleTensor<TW> b{ shape_b, data_type_b, 1, data_type_b == DataType::QSYMM8_PER_CHANNEL ? b_qinfo : QuantizationInfo(1.0f / 255, b_offset) };
George Wort2d7e6832019-02-22 16:37:41 +0000175
176 // Fill reference
177 fill(a, 0);
178 fill(b, 1);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000179 return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>(a, b, shape_output, a_offset, b_offset);
George Wort2d7e6832019-02-22 16:37:41 +0000180}
181}
182
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100183template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false>
Gian Marcoe75a02b2017-11-08 12:24:09 +0000184class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture
Pablo Tello299025a2017-09-29 11:30:12 +0100185{
186public:
187 template <typename...>
George Wort2d7e6832019-02-22 16:37:41 +0000188 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100189 {
George Wort2d7e6832019-02-22 16:37:41 +0000190 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset);
191 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100192 }
193
194protected:
George Wort2d7e6832019-02-22 16:37:41 +0000195 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100196 {
George Wort2d7e6832019-02-22 16:37:41 +0000197 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tello299025a2017-09-29 11:30:12 +0100198 }
199
George Wort2d7e6832019-02-22 16:37:41 +0000200 SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100201 {
George Wort2d7e6832019-02-22 16:37:41 +0000202 return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset);
Pablo Tellobf2fb952017-09-29 16:43:25 +0100203 }
204
Pablo Tello6ff12a02017-11-02 16:09:35 +0000205 TensorType _target{};
206 SimpleTensor<int32_t> _reference{};
Pablo Tellobf2fb952017-09-29 16:43:25 +0100207};
208
Manuel Bottini959c26d2019-12-02 16:22:35 +0000209template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t>
George Wort2d7e6832019-02-22 16:37:41 +0000210class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture
211{
212public:
213 template <typename...>
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000214 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b)
George Wort2d7e6832019-02-22 16:37:41 +0000215 {
216 ARM_COMPUTE_EXPECT(output_stage.type != GEMMLowpOutputStageType::NONE, framework::LogLevel::ERRORS);
Manuel Bottini959c26d2019-12-02 16:22:35 +0000217 DataType data_type_a = data_type_b == DataType::QASYMM8_SIGNED ? DataType::QASYMM8_SIGNED : DataType::QASYMM8;
218
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000219 if(data_type_b == DataType::QSYMM8_PER_CHANNEL)
220 {
Giorgio Arena4bdd1772020-12-17 16:47:07 +0000221 output_stage.is_quantized_per_channel = true;
222 const size_t num_channels = shape_b[0];
223 std::vector<float> scales(num_channels);
224 std::uniform_real_distribution<float> distribution(0.f, 1.f);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000225 library->fill(scales, distribution, 0);
226 output_stage.gemmlowp_multipliers.resize(num_channels);
227 output_stage.gemmlowp_shifts.resize(num_channels);
228 for(size_t i = 0; i < num_channels; ++i)
229 {
Michele Di Giorgiof29d1b72019-10-29 10:58:13 +0000230 quantization::calculate_quantized_multiplier(scales[i], &output_stage.gemmlowp_multipliers[i], &output_stage.gemmlowp_shifts[i]);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000231 }
232
Manuel Bottini959c26d2019-12-02 16:22:35 +0000233 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
234 _target = compute_target(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales));
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000235 }
236 else
237 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000238 _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
239 _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo());
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000240 }
George Wort2d7e6832019-02-22 16:37:41 +0000241 }
242
243protected:
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000244 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000245 DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000246 {
247 return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset,
Manuel Bottini959c26d2019-12-02 16:22:35 +0000248 output_stage, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000249 }
250
Manuel Bottini959c26d2019-12-02 16:22:35 +0000251 SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset,
252 GEMMLowpOutputStageInfo output_stage, DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo)
George Wort2d7e6832019-02-22 16:37:41 +0000253 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000254 SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW>(shape_a, shape_b, shape_output, a_offset, b_offset, data_type_a, data_type_b, b_qinfo);
George Wort2d7e6832019-02-22 16:37:41 +0000255
256 TensorShape bias_shape(shape_b[0]);
257 SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 };
258 fill(bias, 2);
259
260 switch(output_stage.type)
261 {
262 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000263 return reference::gemmlowp_quantize_down_scale<int32_t, TW>(output, bias,
264 output_stage.gemmlowp_offset, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000265 break;
266 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
Manuel Bottini959c26d2019-12-02 16:22:35 +0000267 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TW>(output, bias,
268 output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000269 break;
270 default:
271 ARM_COMPUTE_ERROR("Not Supported!");
272 }
273 }
274
Manuel Bottini959c26d2019-12-02 16:22:35 +0000275 TensorType _target{};
276 SimpleTensor<TI> _reference{};
George Wort2d7e6832019-02-22 16:37:41 +0000277};
278
Gian Marcoe75a02b2017-11-08 12:24:09 +0000279template <typename TensorType, typename AccessorType, typename FunctionType>
280class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture
281{
282public:
283 template <typename...>
Gian Marco6b77e912017-11-17 09:27:57 +0000284 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000285 {
Gian Marco6b77e912017-11-17 09:27:57 +0000286 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
287 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000288 }
289
290protected:
291 template <typename U>
292 void fill(U &&tensor, int i)
293 {
294 std::uniform_int_distribution<> distribution(-6000, 6000);
295 library->fill(tensor, distribution, i);
296 }
297
Gian Marco6b77e912017-11-17 09:27:57 +0000298 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000299 {
Gian Marco6b77e912017-11-17 09:27:57 +0000300 TensorShape shape_bias(shape[0]);
301
Gian Marcoe75a02b2017-11-08 12:24:09 +0000302 // Create tensors
303 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
Gian Marco6b77e912017-11-17 09:27:57 +0000304 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
305 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000306
307 // Create and configure function
Luca Foschiani4b869532020-02-13 15:07:36 +0000308 FunctionType output_stage;
309 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
310 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
311 output_stage_info.gemmlowp_offset = result_offset;
312 output_stage_info.gemmlowp_multiplier = result_mult_int;
313 output_stage_info.gemmlowp_shift = result_shift;
314 output_stage_info.gemmlowp_min_bound = min;
315 output_stage_info.gemmlowp_max_bound = max;
316 output_stage_info.output_data_type = DataType::QASYMM8;
317 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000318
319 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000320 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000321
322 // Allocate tensors
323 a.allocator()->allocate();
Gian Marco6b77e912017-11-17 09:27:57 +0000324 c.allocator()->allocate();
Gian Marcoe75a02b2017-11-08 12:24:09 +0000325
326 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco6b77e912017-11-17 09:27:57 +0000327 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000328
Gian Marco6b77e912017-11-17 09:27:57 +0000329 // Fill tensor
Gian Marcoe75a02b2017-11-08 12:24:09 +0000330 fill(AccessorType(a), 0);
331
Gian Marco6b77e912017-11-17 09:27:57 +0000332 if(add_bias)
333 {
334 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
335
336 // Allocate bias tensor
337 b.allocator()->allocate();
338
339 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
340
341 // Fill tensor
342 fill(AccessorType(b), 1);
343 }
344
Gian Marcoe75a02b2017-11-08 12:24:09 +0000345 // Compute GEMM function
346 output_stage.run();
Gian Marco6b77e912017-11-17 09:27:57 +0000347 return c;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000348 }
349
Gian Marco6b77e912017-11-17 09:27:57 +0000350 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000351 {
352 // Create reference
Gian Marco6b77e912017-11-17 09:27:57 +0000353 TensorShape shape_bias(shape[0]);
354
Gian Marcoe75a02b2017-11-08 12:24:09 +0000355 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
Gian Marco6b77e912017-11-17 09:27:57 +0000356 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
Gian Marcoe75a02b2017-11-08 12:24:09 +0000357
358 // Fill reference
359 fill(a, 0);
360
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000361 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
362 const std::vector<int32_t> result_shift_vec = { result_shift };
363
Gian Marco6b77e912017-11-17 09:27:57 +0000364 if(add_bias)
365 {
366 // Fill bias
367 fill(b, 1);
368
Manuel Bottini959c26d2019-12-02 16:22:35 +0000369 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000370 }
371 else
372 {
Manuel Bottini959c26d2019-12-02 16:22:35 +0000373 return reference::gemmlowp_quantize_down_scale<int32_t, uint8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
Gian Marco6b77e912017-11-17 09:27:57 +0000374 }
Gian Marcoe75a02b2017-11-08 12:24:09 +0000375 }
376
377 TensorType _target{};
378 SimpleTensor<uint8_t> _reference{};
379};
Gian Marco58c57942017-11-28 09:10:03 +0000380
381template <typename TensorType, typename AccessorType, typename FunctionType>
Luca Foschiani4b869532020-02-13 15:07:36 +0000382class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture
383{
384public:
385 template <typename...>
386 void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
387 {
388 _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
389 _reference = compute_reference(shape, result_offset, result_mult_int, result_shift, min, max, add_bias);
390 }
391
392protected:
393 template <typename U>
394 void fill(U &&tensor, int i)
395 {
396 std::uniform_int_distribution<> distribution(-6000, 6000);
397 library->fill(tensor, distribution, i);
398 }
399
400 TensorType compute_target(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
401 {
402 TensorShape shape_bias(shape[0]);
403
404 // Create tensors
405 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
406 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
407 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
408
409 // Create and configure function
410 FunctionType output_stage;
411 GEMMLowpOutputStageInfo output_stage_info = GEMMLowpOutputStageInfo();
412 output_stage_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
413 output_stage_info.gemmlowp_offset = result_offset;
414 output_stage_info.gemmlowp_multiplier = result_mult_int;
415 output_stage_info.gemmlowp_shift = result_shift;
416 output_stage_info.gemmlowp_min_bound = min;
417 output_stage_info.gemmlowp_max_bound = max;
418 output_stage_info.output_data_type = DataType::QASYMM8_SIGNED;
419 output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info);
420
421 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
422 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
423
424 // Allocate tensors
425 a.allocator()->allocate();
426 c.allocator()->allocate();
427
428 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
429 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
430
431 // Fill tensor
432 fill(AccessorType(a), 0);
433
434 if(add_bias)
435 {
436 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
437
438 // Allocate bias tensor
439 b.allocator()->allocate();
440
441 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
442
443 // Fill tensor
444 fill(AccessorType(b), 1);
445 }
446
447 // Compute GEMM function
448 output_stage.run();
449 return c;
450 }
451
452 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
453 {
454 // Create reference
455 TensorShape shape_bias(shape[0]);
456
457 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
458 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
459
460 // Fill reference
461 fill(a, 0);
462
463 const std::vector<int32_t> result_mult_int_vec = { result_mult_int };
464 const std::vector<int32_t> result_shift_vec = { result_shift };
465
466 if(add_bias)
467 {
468 // Fill bias
469 fill(b, 1);
470
471 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, b, result_offset, result_mult_int_vec, result_shift_vec, min, max);
472 }
473 else
474 {
475 return reference::gemmlowp_quantize_down_scale<int32_t, int8_t>(a, result_offset, result_mult_int_vec, result_shift_vec, min, max);
476 }
477 }
478
479 TensorType _target{};
480 SimpleTensor<int8_t> _reference{};
481};
482
483template <typename TensorType, typename AccessorType, typename FunctionType>
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000484class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture
485{
486public:
487 template <typename...>
488 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
489 {
490 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
491 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
492 }
493
494protected:
495 template <typename U>
496 void fill(U &&tensor, int i)
497 {
498 std::uniform_int_distribution<> distribution(-6000, 6000);
499 library->fill(tensor, distribution, i);
500 }
501
502 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
503 {
504 TensorShape shape_bias(shape[0]);
505
506 // Create tensors
507 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
508 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
509 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8_SIGNED, 1);
510
511 // Create and configure function
512 FunctionType output_stage;
513 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
514
515 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
516 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
517
518 // Allocate tensors
519 a.allocator()->allocate();
520 c.allocator()->allocate();
521
522 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
523 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
524
525 // Fill tensor
526 fill(AccessorType(a), 0);
527
528 if(add_bias)
529 {
530 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
531
532 // Allocate bias tensor
533 b.allocator()->allocate();
534
535 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
536
537 // Fill tensor
538 fill(AccessorType(b), 1);
539 }
540
541 // Compute GEMM function
542 output_stage.run();
543 return c;
544 }
545
546 SimpleTensor<int8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
547 bool add_bias)
548 {
549 // Create reference
550 TensorShape shape_bias(shape[0]);
551
552 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
553 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
554
555 // Fill reference
556 fill(a, 0);
557
558 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
559 const std::vector<int32_t> result_shift_vec = { result_shift };
560
561 if(add_bias)
562 {
563 // Fill bias
564 fill(b, 1);
565
566 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
567 }
568 else
569 {
570 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
571 }
572 }
573
574 TensorType _target{};
575 SimpleTensor<int8_t> _reference{};
576};
577
578template <typename TensorType, typename AccessorType, typename FunctionType>
Gian Marco58c57942017-11-28 09:10:03 +0000579class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture
580{
581public:
582 template <typename...>
583 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
584 {
585 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
586 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias);
587 }
588
589protected:
590 template <typename U>
591 void fill(U &&tensor, int i)
592 {
593 std::uniform_int_distribution<> distribution(-6000, 6000);
594 library->fill(tensor, distribution, i);
595 }
596
597 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias)
598 {
599 TensorShape shape_bias(shape[0]);
600
601 // Create tensors
602 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
603 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
604 TensorType c = create_tensor<TensorType>(shape, DataType::QASYMM8, 1);
605
606 // Create and configure function
607 FunctionType output_stage;
608 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
609
610 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
611 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
612
613 // Allocate tensors
614 a.allocator()->allocate();
615 c.allocator()->allocate();
616
617 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
618 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
619
620 // Fill tensor
621 fill(AccessorType(a), 0);
622
623 if(add_bias)
624 {
625 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
626
627 // Allocate bias tensor
628 b.allocator()->allocate();
629
630 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
631
632 // Fill tensor
633 fill(AccessorType(b), 1);
634 }
635
636 // Compute GEMM function
637 output_stage.run();
638 return c;
639 }
640
641 SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max,
642 bool add_bias)
643 {
644 // Create reference
645 TensorShape shape_bias(shape[0]);
646
647 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
648 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
649
650 // Fill reference
651 fill(a, 0);
652
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000653 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
654 const std::vector<int32_t> result_shift_vec = { result_shift };
655
Gian Marco58c57942017-11-28 09:10:03 +0000656 if(add_bias)
657 {
658 // Fill bias
659 fill(b, 1);
660
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000661 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000662 }
663 else
664 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000665 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, result_offset_after_shift, min, max);
Gian Marco58c57942017-11-28 09:10:03 +0000666 }
667 }
668
669 TensorType _target{};
670 SimpleTensor<uint8_t> _reference{};
671};
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000672
Sheri Zhang1b14c752020-03-09 14:29:52 +0000673template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
674class GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture : public framework::Fixture
675{
676public:
677 template <typename...>
678 void setup(DataType data_type, TensorShape shape, float result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
679 {
680 _target = compute_target(data_type, shape, result_real_multiplier, result_offset, min, max, add_bias);
681 _reference = compute_reference(shape, result_real_multiplier, result_offset, min, max, add_bias);
682 }
683
684protected:
685 template <typename U>
686 void fill(U &&tensor, int i)
687 {
688 // To avoid data all being clampped
689 std::uniform_int_distribution<> distribution(-500, 500);
690 library->fill(tensor, distribution, i);
691 }
692
693 TensorType compute_target(DataType data_type, const TensorShape &shape, float result_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
694 {
695 TensorShape shape_bias(shape[0]);
696
697 // Create tensors
698 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
699 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
700 TensorType c = create_tensor<TensorType>(shape, data_type, 1);
701
702 // create output stage info
703 GEMMLowpOutputStageInfo info;
704 info.gemmlowp_max_bound = max;
705 info.gemmlowp_min_bound = min;
706 info.gemmlowp_real_multiplier = result_multiplier;
707 info.gemmlowp_offset = result_offset;
708 info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT;
709 info.output_data_type = data_type;
710
711 // Create and configure function
712 FunctionType output_stage;
713 output_stage.configure(&a, add_bias ? &b : nullptr, &c, info);
714
715 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
716 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
717
718 // Allocate tensors
719 a.allocator()->allocate();
720 c.allocator()->allocate();
721
722 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
723 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
724
725 // Fill tensor
726 fill(AccessorType(a), 0);
727
728 if(add_bias)
729 {
730 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
731
732 // Allocate bias tensor
733 b.allocator()->allocate();
734
735 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
736
737 // Fill tensor
738 fill(AccessorType(b), 1);
739 }
740
741 // Compute GEMM function
742 output_stage.run();
743 return c;
744 }
745
746 SimpleTensor<T> compute_reference(const TensorShape &shape, float_t result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias)
747 {
748 // Create reference
749 TensorShape shape_bias(shape[0]);
750
751 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
752 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
753
754 // Fill reference
755 fill(a, 0);
756
757 const std::vector<float_t> result_float_multiplier_vec = { result_real_multiplier };
758
759 if(add_bias)
760 {
761 // Fill bias
762 fill(b, 1);
763
764 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, b, result_float_multiplier_vec, result_offset, min, max);
765 }
766 else
767 {
768 return reference::gemmlowp_quantize_down_scale_by_float<int32_t, T>(a, result_float_multiplier_vec, result_offset, min, max);
769 }
770 }
771
772 TensorType _target{};
773 SimpleTensor<T> _reference{};
774};
775
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100776template <typename TensorType, typename AccessorType, typename FunctionType>
777class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture
778{
779public:
780 template <typename...>
781 void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
782 {
783 _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
784 _reference = compute_reference(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias);
785 }
786
787protected:
788 template <typename U>
789 void fill(U &&tensor, int i)
790 {
791 std::uniform_int_distribution<> distribution(-6000, 6000);
792 library->fill(tensor, distribution, i);
793 }
794
795 TensorType compute_target(const TensorShape &shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias)
796 {
797 TensorShape shape_bias(shape[0]);
798
799 // Create tensors
800 TensorType a = create_tensor<TensorType>(shape, DataType::S32, 1);
801 TensorType b = create_tensor<TensorType>(shape_bias, DataType::S32, 1);
802 TensorType c = create_tensor<TensorType>(shape, DataType::QSYMM16, 1);
803
804 // Create and configure function
805 FunctionType output_stage;
806 output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max);
807
808 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
809 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
810
811 // Allocate tensors
812 a.allocator()->allocate();
813 c.allocator()->allocate();
814
815 ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
816 ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
817
818 // Fill tensor
819 fill(AccessorType(a), 0);
820
821 if(add_bias)
822 {
823 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
824
825 // Allocate bias tensor
826 b.allocator()->allocate();
827
828 ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
829
830 // Fill tensor
831 fill(AccessorType(b), 1);
832 }
833
834 // Compute GEMM function
835 output_stage.run();
836 return c;
837 }
838
839 SimpleTensor<int16_t> compute_reference(const TensorShape &shape, int32_t result_fixed_point_multiplier, int32_t result_shift, int32_t min, int32_t max,
840 bool add_bias)
841 {
842 // Create reference
843 TensorShape shape_bias(shape[0]);
844
845 SimpleTensor<int32_t> a{ shape, DataType::S32, 1 };
846 SimpleTensor<int32_t> b{ shape_bias, DataType::S32, 1 };
847
848 // Fill reference
849 fill(a, 0);
850
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000851 const std::vector<int32_t> result_fixed_point_multiplier_vec = { result_fixed_point_multiplier };
852 const std::vector<int32_t> result_shift_vec = { result_shift };
853
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100854 if(add_bias)
855 {
856 // Fill bias
857 fill(b, 1);
858
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000859 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, b, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100860 }
861 else
862 {
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000863 return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, int16_t>(a, result_fixed_point_multiplier_vec, result_shift_vec, 0, min, max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100864 }
865 }
866
867 TensorType _target{};
868 SimpleTensor<int16_t> _reference{};
869};
870
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000871template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
872class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
873{
874public:
875 template <typename...>
876 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs,
Sheri Zhang28287af2020-02-25 14:13:54 +0000877 bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000878 {
879 GEMMLHSMatrixInfo lhs_info;
880 lhs_info.m0 = m0;
881 lhs_info.k0 = k0;
882 lhs_info.v0 = v0;
883 lhs_info.interleave = interleave_lhs;
884 lhs_info.transpose = false;
885
886 GEMMRHSMatrixInfo rhs_info;
887 rhs_info.n0 = n0;
888 rhs_info.k0 = k0;
889 rhs_info.h0 = h0;
890 rhs_info.interleave = interleave_rhs;
891 rhs_info.transpose = true;
892
893 // Set the tensor shapes for LHS and RHS matrices
894 const TensorShape lhs_shape(k, m, batch_size);
895 const TensorShape rhs_shape(n, k, batch_size);
896
Sheri Zhang28287af2020-02-25 14:13:54 +0000897 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
898 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000899 }
900
901protected:
902 template <typename U>
903 void fill(U &&tensor, int i)
904 {
Sheri Zhang28287af2020-02-25 14:13:54 +0000905 switch(tensor.data_type())
906 {
907 case DataType::QASYMM8:
908 {
909 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
910 std::uniform_int_distribution<> distribution(1, 254);
911 library->fill(tensor, distribution, i);
912 }
913 break;
914 case DataType::QASYMM8_SIGNED:
915 {
916 std::uniform_int_distribution<> distribution(-127, 126);
917 library->fill(tensor, distribution, i);
918 }
919 break;
920 default:
921 ARM_COMPUTE_ERROR("Unsupported data type");
922 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000923 }
924
Sheri Zhang28287af2020-02-25 14:13:54 +0000925 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000926 {
927 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +0000928 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
929 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000930 TensorType lhs_reshaped;
931 TensorType rhs_reshaped;
932 TensorType dst;
933
934 const unsigned int M = lhs_shape[1];
935 const unsigned int N = rhs_shape[0];
936 const unsigned int K = lhs_shape[0];
937
938 // The output tensor will be auto-initialized within the function
939
940 // Create and configure function
941 ReshapeLHSFunctionType reshape_lhs;
942 ReshapeRHSFunctionType reshape_rhs;
943 GEMMFunctionType gemm;
944 reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
945 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
946 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
947
948 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
949 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
950
951 // Allocate tensors
952 lhs.allocator()->allocate();
953 rhs.allocator()->allocate();
954 lhs_reshaped.allocator()->allocate();
955 rhs_reshaped.allocator()->allocate();
956 dst.allocator()->allocate();
957
958 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
959 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
960 ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
961 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
962 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
963
964 // Fill tensors
965 fill(AccessorType(lhs), 0);
966 fill(AccessorType(rhs), 1);
967
968 // Compute GEMM
969 reshape_lhs.run();
970 reshape_rhs.run();
971 gemm.run();
972
973 return dst;
974 }
975
Sheri Zhang28287af2020-02-25 14:13:54 +0000976 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000977 {
978 TensorShape dst_shape = lhs_shape;
979 dst_shape[0] = rhs_shape[0];
980 dst_shape[1] = lhs_shape[1];
981
Sheri Zhang28287af2020-02-25 14:13:54 +0000982 switch(data_type)
983 {
984 case DataType::QASYMM8:
985 {
986 // Create reference
987 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
988 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000989
Sheri Zhang28287af2020-02-25 14:13:54 +0000990 // Fill reference
991 fill(lhs, 0);
992 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000993
Sheri Zhang28287af2020-02-25 14:13:54 +0000994 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
995 }
996 case DataType::QASYMM8_SIGNED:
997 {
998 // Create reference
999 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1000 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1001
1002 // Fill reference
1003 fill(lhs, 0);
1004 fill(rhs, 1);
1005
1006 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1007 }
1008 default:
1009 ARM_COMPUTE_ERROR("Unsupported data type");
1010 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001011 }
1012
1013 TensorType _target{};
1014 SimpleTensor<int32_t> _reference{};
1015};
1016
1017template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
1018class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
1019{
1020public:
1021 template <typename...>
1022 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
Sheri Zhang28287af2020-02-25 14:13:54 +00001023 bool interleave_lhs, bool interleave_rhs, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001024 {
1025 GEMMLHSMatrixInfo lhs_info;
1026 lhs_info.m0 = m0;
1027 lhs_info.k0 = k0;
1028 lhs_info.v0 = v0;
1029 lhs_info.interleave = interleave_lhs;
1030 lhs_info.transpose = false;
1031
1032 GEMMRHSMatrixInfo rhs_info;
1033 rhs_info.n0 = n0;
1034 rhs_info.k0 = k0;
1035 rhs_info.h0 = h0;
1036 rhs_info.interleave = interleave_rhs;
1037 rhs_info.transpose = true;
1038
1039 // In case of GEMM3D, m is the product between m_w and m_h
1040 const unsigned int m = m_w * m_h;
1041
1042 // Set the tensor shapes for LHS and RHS matrices
1043 const TensorShape lhs_shape(k, m, batch_size);
1044 const TensorShape rhs_shape(n, k, batch_size);
1045
Sheri Zhang28287af2020-02-25 14:13:54 +00001046 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1047 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001048 }
1049
1050protected:
1051 template <typename U>
1052 void fill(U &&tensor, int i)
1053 {
Sheri Zhang28287af2020-02-25 14:13:54 +00001054 switch(tensor.data_type())
1055 {
1056 case DataType::QASYMM8:
1057 {
1058 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1059 std::uniform_int_distribution<> distribution(1, 254);
1060 library->fill(tensor, distribution, i);
1061 }
1062 break;
1063 case DataType::QASYMM8_SIGNED:
1064 {
1065 std::uniform_int_distribution<> distribution(-127, 126);
1066 library->fill(tensor, distribution, i);
1067 }
1068 break;
1069 default:
1070 ARM_COMPUTE_ERROR("Unsupported data type");
1071 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001072 }
1073
Sheri Zhang28287af2020-02-25 14:13:54 +00001074 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h,
1075 DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001076 {
1077 // Create tensors
Sheri Zhang28287af2020-02-25 14:13:54 +00001078 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1079 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001080 TensorType lhs_reshaped;
1081 TensorType rhs_reshaped;
1082 TensorType dst;
1083
1084 const unsigned int M = lhs_shape[1];
1085 const unsigned int N = rhs_shape[0];
1086 const unsigned int K = lhs_shape[0];
1087
1088 // The output tensor will be auto-initialized within the function
1089
1090 // Create and configure function
1091 ReshapeLHSFunctionType reshape_lhs;
1092 ReshapeRHSFunctionType reshape_rhs;
1093 GEMMFunctionType gemm;
1094 reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
1095 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
1096 gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1097
1098 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1099 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1100
1101 // Allocate tensors
1102 lhs.allocator()->allocate();
1103 rhs.allocator()->allocate();
1104 lhs_reshaped.allocator()->allocate();
1105 rhs_reshaped.allocator()->allocate();
1106 dst.allocator()->allocate();
1107
1108 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1109 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1110 ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1111 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1112 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1113
1114 // Fill tensors
1115 fill(AccessorType(lhs), 0);
1116 fill(AccessorType(rhs), 1);
1117
1118 // Compute GEMM
1119 reshape_lhs.run();
1120 reshape_rhs.run();
1121 gemm.run();
1122
1123 return dst;
1124 }
1125
Sheri Zhang28287af2020-02-25 14:13:54 +00001126 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001127 {
1128 TensorShape dst_shape = lhs_shape;
1129 dst_shape.set(0, rhs_shape[0]);
1130 dst_shape.set(1, lhs_shape[1] / m_h);
1131 dst_shape.set(2, m_h);
1132 dst_shape.set(3, lhs_shape[2]);
1133
Sheri Zhang28287af2020-02-25 14:13:54 +00001134 switch(data_type)
1135 {
1136 case DataType::QASYMM8:
1137 {
1138 // Create reference
1139 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1140 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001141
Sheri Zhang28287af2020-02-25 14:13:54 +00001142 // Fill reference
1143 fill(lhs, 0);
1144 fill(rhs, 1);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001145
Sheri Zhang28287af2020-02-25 14:13:54 +00001146 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1147 }
1148 case DataType::QASYMM8_SIGNED:
1149 {
1150 // Create reference
1151 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1152 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1153
1154 // Fill reference
1155 fill(lhs, 0);
1156 fill(rhs, 1);
1157
1158 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1159 }
1160 default:
1161 ARM_COMPUTE_ERROR("Unsupported data type");
1162 }
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001163 }
1164
1165 TensorType _target{};
1166 SimpleTensor<int32_t> _reference{};
1167};
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001168
1169template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
1170class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
1171{
1172public:
1173 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001174 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1175 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001176 {
1177 GEMMLHSMatrixInfo lhs_info;
1178 lhs_info.m0 = m0;
1179 lhs_info.k0 = k0;
1180
1181 GEMMRHSMatrixInfo rhs_info;
1182 rhs_info.n0 = n0;
1183 rhs_info.k0 = k0;
1184 rhs_info.h0 = h0;
1185 rhs_info.interleave = interleave_rhs;
1186 rhs_info.transpose = transpose_rhs;
1187
1188 // Set the tensor shapes for LHS and RHS matrices
1189 const TensorShape lhs_shape(k, m, batch_size);
1190 const TensorShape rhs_shape(n, k, batch_size);
1191
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001192 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
1193 _reference = compute_reference(lhs_shape, rhs_shape, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001194 }
1195
1196protected:
1197 template <typename U>
1198 void fill(U &&tensor, int i)
1199 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001200 switch(tensor.data_type())
1201 {
1202 case DataType::QASYMM8:
1203 {
1204 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1205 std::uniform_int_distribution<> distribution(1, 254);
1206 library->fill(tensor, distribution, i);
1207 }
1208 break;
1209 case DataType::QASYMM8_SIGNED:
1210 {
1211 std::uniform_int_distribution<> distribution(-127, 126);
1212 library->fill(tensor, distribution, i);
1213 }
1214 break;
1215 default:
1216 ARM_COMPUTE_ERROR("Unsupported data type");
1217 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001218 }
1219
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001220 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1221 const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001222 {
1223 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001224 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1225 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001226 TensorType rhs_reshaped;
1227 TensorType dst;
1228
1229 const unsigned int M = lhs_shape[1];
1230 const unsigned int N = rhs_shape[0];
1231 const unsigned int K = lhs_shape[0];
1232
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001233 GEMMKernelInfo gemm_info;
1234 gemm_info.m = M;
1235 gemm_info.n = N;
1236 gemm_info.k = K;
1237 gemm_info.lhs_info = lhs_info;
1238 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001239 // The output tensor will be auto-initialized within the function
1240
1241 // Create and configure function
1242 ReshapeRHSFunctionType reshape_rhs;
1243 GEMMFunctionType gemm;
1244 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001245 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001246
1247 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1248 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1249
1250 // Allocate tensors
1251 lhs.allocator()->allocate();
1252 rhs.allocator()->allocate();
1253 rhs_reshaped.allocator()->allocate();
1254 dst.allocator()->allocate();
1255
1256 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1257 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1258 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1259 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1260
1261 // Fill tensors
1262 fill(AccessorType(lhs), 0);
1263 fill(AccessorType(rhs), 1);
1264
1265 // Compute GEMM
1266 reshape_rhs.run();
1267 gemm.run();
1268
1269 return dst;
1270 }
1271
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001272 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001273 {
1274 TensorShape dst_shape = lhs_shape;
1275 dst_shape[0] = rhs_shape[0];
1276 dst_shape[1] = lhs_shape[1];
1277
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001278 if(data_type == DataType::QASYMM8)
1279 {
1280 // Create reference
1281 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1282 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001283
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001284 // Fill reference
1285 fill(lhs, 0);
1286 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001287
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001288 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1289 }
1290 else
1291 {
1292 // Create reference
1293 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1294 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1295
1296 // Fill reference
1297 fill(lhs, 0);
1298 fill(rhs, 1);
1299
1300 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1301 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001302 }
1303
1304 TensorType _target{};
1305 SimpleTensor<int32_t> _reference{};
1306};
1307
1308template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
1309class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
1310{
1311public:
1312 template <typename...>
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001313 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
1314 unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001315 {
1316 GEMMLHSMatrixInfo lhs_info;
1317 lhs_info.m0 = m0;
1318 lhs_info.k0 = k0;
1319
1320 GEMMRHSMatrixInfo rhs_info;
1321 rhs_info.n0 = n0;
1322 rhs_info.k0 = k0;
1323 rhs_info.h0 = h0;
1324 rhs_info.interleave = interleave_rhs;
1325 rhs_info.transpose = transpose_rhs;
1326
1327 // In case of GEMM3D, m is the product between m_w and m_h
1328 const unsigned int m = m_w * m_h;
1329
1330 // Set the tensor shapes for LHS and RHS matrices
1331 const TensorShape lhs_shape(k, m, batch_size);
1332 const TensorShape rhs_shape(n, k, batch_size);
1333
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001334 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
1335 _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001336 }
1337
1338protected:
1339 template <typename U>
1340 void fill(U &&tensor, int i)
1341 {
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001342 switch(tensor.data_type())
1343 {
1344 case DataType::QASYMM8:
1345 {
1346 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1347 std::uniform_int_distribution<> distribution(1, 254);
1348 library->fill(tensor, distribution, i);
1349 }
1350 break;
1351 case DataType::QASYMM8_SIGNED:
1352 {
1353 std::uniform_int_distribution<> distribution(-127, 126);
1354 library->fill(tensor, distribution, i);
1355 }
1356 break;
1357 default:
1358 ARM_COMPUTE_ERROR("Unsupported data type");
1359 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001360 }
1361
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001362 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
1363 const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001364 {
1365 // Create tensors
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001366 TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
1367 TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001368 TensorType rhs_reshaped;
1369 TensorType dst;
1370
1371 const unsigned int M = lhs_shape[1];
1372 const unsigned int N = rhs_shape[0];
1373 const unsigned int K = lhs_shape[0];
1374
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001375 GEMMKernelInfo gemm_info;
1376 gemm_info.m = M;
1377 gemm_info.n = N;
1378 gemm_info.k = K;
1379 gemm_info.depth_output_gemm3d = m_h;
1380 gemm_info.lhs_info = lhs_info;
1381 gemm_info.rhs_info = rhs_info;
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001382 // The output tensor will be auto-initialized within the function
1383
1384 // Create and configure function
1385 ReshapeRHSFunctionType reshape_rhs;
1386 GEMMFunctionType gemm;
1387 reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
Michele Di Giorgiob54ba282020-01-14 15:31:55 +00001388 gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001389
1390 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1391 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1392
1393 // Allocate tensors
1394 lhs.allocator()->allocate();
1395 rhs.allocator()->allocate();
1396 rhs_reshaped.allocator()->allocate();
1397 dst.allocator()->allocate();
1398
1399 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1400 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1401 ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
1402 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1403
1404 // Fill tensors
1405 fill(AccessorType(lhs), 0);
1406 fill(AccessorType(rhs), 1);
1407
1408 // Compute GEMM
1409 reshape_rhs.run();
1410 gemm.run();
1411
1412 return dst;
1413 }
1414
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001415 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001416 {
1417 TensorShape dst_shape = lhs_shape;
1418 dst_shape.set(0, rhs_shape[0]);
1419 dst_shape.set(1, lhs_shape[1] / m_h);
1420 dst_shape.set(2, m_h);
1421 dst_shape.set(3, lhs_shape[2]);
1422
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001423 if(data_type == DataType::QASYMM8)
1424 {
1425 // Create reference
1426 SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
1427 SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001428
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001429 // Fill reference
1430 fill(lhs, 0);
1431 fill(rhs, 1);
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001432
Michele Di Giorgiof9179d32019-11-27 16:17:30 +00001433 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1434 }
1435 else
1436 {
1437 // Create reference
1438 SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
1439 SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
1440
1441 // Fill reference
1442 fill(lhs, 0);
1443 fill(rhs, 1);
1444
1445 return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
1446 }
Gian Marco Iodice62251f72019-03-11 16:07:12 +00001447 }
1448
1449 TensorType _target{};
1450 SimpleTensor<int32_t> _reference{};
1451};
Gian Marco Iodicee7510622019-06-03 17:28:17 +01001452
1453template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1454class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture
1455{
1456public:
1457 template <typename...>
1458 void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1459 {
1460 GEMMLHSMatrixInfo lhs_info;
1461 lhs_info.m0 = m0;
1462 lhs_info.k0 = k0;
1463
1464 GEMMRHSMatrixInfo rhs_info;
1465 rhs_info.n0 = n0;
1466 rhs_info.k0 = k0;
1467
1468 // Set the tensor shapes for LHS and RHS matrices
1469 const TensorShape lhs_shape(k, m, batch_size);
1470 const TensorShape rhs_shape(n, k, batch_size);
1471
1472 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
1473 _reference = compute_reference(lhs_shape, rhs_shape);
1474 }
1475
1476protected:
1477 template <typename U>
1478 void fill(U &&tensor, int i)
1479 {
1480 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1481 std::uniform_int_distribution<> distribution(1, 254);
1482 library->fill(tensor, distribution, i);
1483 }
1484
1485 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
1486 {
1487 // Create tensors
1488 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1489 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1490 TensorType dst;
1491
1492 const unsigned int M = lhs_shape[1];
1493 const unsigned int N = rhs_shape[0];
1494 const unsigned int K = lhs_shape[0];
1495
1496 // The output tensor will be auto-initialized within the function
1497
1498 // Create and configure function
1499 GEMMFunctionType gemm;
1500 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
1501
1502 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1503 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1504
1505 // Allocate tensors
1506 lhs.allocator()->allocate();
1507 rhs.allocator()->allocate();
1508 dst.allocator()->allocate();
1509
1510 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1511 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1512 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1513
1514 // Fill tensors
1515 fill(AccessorType(lhs), 0);
1516 fill(AccessorType(rhs), 1);
1517
1518 // Compute GEMM
1519 gemm.run();
1520
1521 return dst;
1522 }
1523
1524 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
1525 {
1526 TensorShape dst_shape = lhs_shape;
1527 dst_shape[0] = rhs_shape[0];
1528 dst_shape[1] = lhs_shape[1];
1529
1530 // Create reference
1531 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1532 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1533
1534 // Fill reference
1535 fill(lhs, 0);
1536 fill(rhs, 1);
1537
1538 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1539 }
1540
1541 TensorType _target{};
1542 SimpleTensor<int32_t> _reference{};
1543};
1544
1545template <typename TensorType, typename AccessorType, typename GEMMFunctionType>
1546class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture
1547{
1548public:
1549 template <typename...>
1550 void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0)
1551 {
1552 GEMMLHSMatrixInfo lhs_info;
1553 lhs_info.m0 = m0;
1554 lhs_info.k0 = k0;
1555
1556 GEMMRHSMatrixInfo rhs_info;
1557 rhs_info.n0 = n0;
1558 rhs_info.k0 = k0;
1559
1560 // In case of GEMM3D, m is the product between m_w and m_h
1561 const unsigned int m = m_w * m_h;
1562
1563 // Set the tensor shapes for LHS and RHS matrices
1564 const TensorShape lhs_shape(k, m, batch_size);
1565 const TensorShape rhs_shape(n, k, batch_size);
1566
1567 _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
1568 _reference = compute_reference(lhs_shape, rhs_shape, m_h);
1569 }
1570
1571protected:
1572 template <typename U>
1573 void fill(U &&tensor, int i)
1574 {
1575 // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
1576 std::uniform_int_distribution<> distribution(1, 254);
1577 library->fill(tensor, distribution, i);
1578 }
1579
1580 TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
1581 {
1582 // Create tensors
1583 TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
1584 TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
1585 TensorType dst;
1586
1587 const unsigned int M = lhs_shape[1];
1588 const unsigned int N = rhs_shape[0];
1589 const unsigned int K = lhs_shape[0];
1590
1591 // The output tensor will be auto-initialized within the function
1592
1593 // Create and configure function
1594 GEMMFunctionType gemm;
1595 gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
1596
1597 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1598 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1599
1600 // Allocate tensors
1601 lhs.allocator()->allocate();
1602 rhs.allocator()->allocate();
1603 dst.allocator()->allocate();
1604
1605 ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1606 ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
1607 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
1608
1609 // Fill tensors
1610 fill(AccessorType(lhs), 0);
1611 fill(AccessorType(rhs), 1);
1612
1613 // Compute GEMM
1614 gemm.run();
1615
1616 return dst;
1617 }
1618
1619 SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
1620 {
1621 TensorShape dst_shape = lhs_shape;
1622 dst_shape.set(0, rhs_shape[0]);
1623 dst_shape.set(1, lhs_shape[1] / m_h);
1624 dst_shape.set(2, m_h);
1625 dst_shape.set(3, lhs_shape[2]);
1626
1627 // Create reference
1628 SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
1629 SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
1630
1631 // Fill reference
1632 fill(lhs, 0);
1633 fill(rhs, 1);
1634
1635 return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
1636 }
1637
1638 TensorType _target{};
1639 SimpleTensor<int32_t> _reference{};
1640};
Pablo Tello299025a2017-09-29 11:30:12 +01001641} // namespace validation
1642} // namespace test
1643} // namespace arm_compute
George Wort2d7e6832019-02-22 16:37:41 +00001644#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */