blob: 9d075e12c11a21dac53b808b355d5553ff910b0e [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2017-2021 Arm Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000025#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
26#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
Pablo Tello299025a2017-09-29 11:30:12 +010027#include "arm_compute/runtime/Tensor.h"
28#include "arm_compute/runtime/TensorAllocator.h"
29#include "tests/NEON/Accessor.h"
Gian Marco Iodiceab182122017-10-09 15:05:40 +010030#include "tests/NEON/Helper.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000031#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000032#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marcofa4cacd2017-10-18 17:05:02 +010033#include "tests/datasets/LargeGEMMLowpDataset.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000034#include "tests/datasets/ShapeDatasets.h"
Gian Marcofa4cacd2017-10-18 17:05:02 +010035#include "tests/datasets/SmallGEMMLowpDataset.h"
Pablo Tello299025a2017-09-29 11:30:12 +010036#include "tests/framework/Asserts.h"
37#include "tests/framework/Macros.h"
38#include "tests/framework/datasets/Datasets.h"
39#include "tests/validation/Validation.h"
40#include "tests/validation/fixtures/GEMMLowpFixture.h"
41
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
Pablo Tello299025a2017-09-29 11:30:12 +010048TEST_SUITE(NEON)
49TEST_SUITE(GEMMLowp)
Gian Marcoe75a02b2017-11-08 12:24:09 +000050TEST_SUITE(MatrixMultiplyCore)
51using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
Gian Marcofa4cacd2017-10-18 17:05:02 +010052
morgolock4adaddb2020-09-29 14:24:32 +010053DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
54 shape_a, shape_b, shape_c, a_offset, b_offset)
55{
56 // Create tensors
57 Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8);
58 Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8);
59 Tensor c = create_tensor<Tensor>(shape_c, DataType::S32);
60
61 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
62 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
63
64 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
65 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
66 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
67
68 // Create and configure function
69 NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
70 gemmlowp_mm.configure(&a, &b, nullptr, &c);
71
72 // Validate padding is zero
73 validate(a.info()->padding(), PaddingSize());
74 validate(b.info()->padding(), PaddingSize());
75 validate(c.info()->padding(), PaddingSize());
76}
77
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000078// *INDENT-OFF*
79// clang-format off
80DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
81 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010082 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Mismatching data type
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000083 TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
84 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
85 TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
86 }),
87 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
88 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
89 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
90 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
91 TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
92 })),
93 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
94 TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
95 TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
96 TensorInfo(TensorShape(8U, 11U), 1, DataType::S32),
97 TensorInfo(TensorShape(64U, 32U), 1, DataType::S32),
98 })),
morgolock4adaddb2020-09-29 14:24:32 +010099 framework::dataset::make("Expected", { true, false, false, false, true })),
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000100 a_info, b_info, output_info, expected)
101{
102 // Lock tensors
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000103 Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false),
104 &b_info.clone()->set_is_resizable(false),
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100105 nullptr,
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000106 &output_info.clone()->set_is_resizable(false));
107 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000108}
109// clang-format on
110// *INDENT-ON*
111
Gian Marcoe75a02b2017-11-08 12:24:09 +0000112FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
Pablo Tello299025a2017-09-29 11:30:12 +0100113{
114 // Validate output
Gian Marcofa4cacd2017-10-18 17:05:02 +0100115 validate(Accessor(_target), _reference);
116}
117
Gian Marcoe75a02b2017-11-08 12:24:09 +0000118FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
Gian Marcofa4cacd2017-10-18 17:05:02 +0100119{
120 // Validate output
121 validate(Accessor(_target), _reference);
Pablo Tello299025a2017-09-29 11:30:12 +0100122}
Pablo Tello299025a2017-09-29 11:30:12 +0100123
George Wort2d7e6832019-02-22 16:37:41 +0000124using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
125TEST_SUITE(FusedOffsetOutput)
Manuel Bottini959c26d2019-12-02 16:22:35 +0000126FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000127 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +0000128{
129 // Validate output
130 validate(Accessor(_target), _reference);
131}
132
Manuel Bottini959c26d2019-12-02 16:22:35 +0000133FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000134 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +0000135{
136 // Validate output
137 validate(Accessor(_target), _reference);
138}
139TEST_SUITE_END() // FusedOffsetOutput
Gian Marcoe75a02b2017-11-08 12:24:09 +0000140TEST_SUITE_END() // MatrixMultiplyCore
141
142TEST_SUITE(OutputStage)
143
Luca Foschiani4b869532020-02-13 15:07:36 +0000144TEST_SUITE(QuantizeDownInt32Scale)
145
146TEST_SUITE(QASYMM8)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000147
Gian Marco05288a22017-11-21 10:57:50 +0000148const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
Gian Marco6b77e912017-11-17 09:27:57 +0000149 3)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000150 * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
Gian Marco6b77e912017-11-17 09:27:57 +0000151
Gian Marco05288a22017-11-21 10:57:50 +0000152const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
Gian Marco6b77e912017-11-17 09:27:57 +0000153 2)
154 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
155
Luca Foschiani4b869532020-02-13 15:07:36 +0000156using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000157
Gian Marco7f0f7902017-12-07 09:26:56 +0000158// *INDENT-OFF*
159// clang-format off
160DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
161 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
Gian Marco7f0f7902017-12-07 09:26:56 +0000162 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
163 }),
164 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Gian Marco7f0f7902017-12-07 09:26:56 +0000165 TensorInfo(TensorShape(20U), 1, DataType::S32),
166 })),
167 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8),
Gian Marco7f0f7902017-12-07 09:26:56 +0000168 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
169 })),
170 framework::dataset::make("Min",{ 0,
Gian Marco7f0f7902017-12-07 09:26:56 +0000171 13,
172 })),
173 framework::dataset::make("Max",{ 205,
Gian Marco7f0f7902017-12-07 09:26:56 +0000174 180,
175 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000176 framework::dataset::make("Expected", { true, false })),
Gian Marco7f0f7902017-12-07 09:26:56 +0000177 a_info, b_info, output_info, min, max, expected)
178{
Luca Foschiani4b869532020-02-13 15:07:36 +0000179
180 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
181 output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
182 output_stage.gemmlowp_min_bound = min;
183 output_stage.gemmlowp_max_bound = max;
184 output_stage.output_data_type = DataType::QASYMM8;
185
Gian Marco7f0f7902017-12-07 09:26:56 +0000186 // Lock tensors
Luca Foschiani4b869532020-02-13 15:07:36 +0000187 Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false),
Gian Marco7f0f7902017-12-07 09:26:56 +0000188 &b_info.clone()->set_is_resizable(false),
189 &output_info.clone()->set_is_resizable(false),
Luca Foschiani4b869532020-02-13 15:07:36 +0000190 output_stage);
Gian Marco7f0f7902017-12-07 09:26:56 +0000191 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
192}
193// clang-format on
194// *INDENT-ON*
195
morgolockf1109542020-09-15 14:33:54 +0100196TEST_CASE(NoPaddingAdded, framework::DatasetMode::PRECOMMIT)
197{
198 Tensor input1 = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::S32);
199 Tensor input2 = create_tensor<Tensor>(TensorShape(21U, 1U), DataType::S32);
200 Tensor output = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::QASYMM8);
201
202 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
Michalis Spyroud175ece2020-07-30 23:39:32 +0100203 output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
204 output_stage.gemmlowp_min_bound = 0;
205 output_stage.gemmlowp_max_bound = 205;
206 output_stage.output_data_type = DataType::QASYMM8;
morgolockf1109542020-09-15 14:33:54 +0100207
208 NEGEMMLowpOutputStage f;
209 f.configure(&input1, &input2, &output, output_stage);
210
211 // Validate padding is zero
212 validate(input1.info()->padding(), PaddingSize());
213 validate(input2.info()->padding(), PaddingSize());
214 validate(output.info()->padding(), PaddingSize());
215}
216
Luca Foschiani4b869532020-02-13 15:07:36 +0000217FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marcoe75a02b2017-11-08 12:24:09 +0000218{
219 // Validate output
220 validate(Accessor(_target), _reference);
221}
222
Gian Marco6b77e912017-11-17 09:27:57 +0000223TEST_SUITE(BoundedReLu)
Luca Foschiani4b869532020-02-13 15:07:36 +0000224FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco6b77e912017-11-17 09:27:57 +0000225{
226 // Validate output
227 validate(Accessor(_target), _reference);
228}
Gian Marcoe75a02b2017-11-08 12:24:09 +0000229
Gian Marco6b77e912017-11-17 09:27:57 +0000230TEST_SUITE_END() // BoundedReLu
231
Luca Foschiani4b869532020-02-13 15:07:36 +0000232TEST_SUITE_END() // QASYMM8
233
234TEST_SUITE(QASYMM8_SIGNED)
235
236const auto quantize_down_int32_to_int8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
237 3)
238 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
239
240const auto quantize_down_int32_to_int8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
241 2)
242 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", -100, -98) * framework::dataset::make("max", 71, 74) * framework::dataset::make("addBias", { false, true });
243
244using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>;
245
246// *INDENT-OFF*
247// clang-format off
248DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
249 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
250 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
251 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
252 }),
253 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
254 TensorInfo(TensorShape(21U), 1, DataType::S32),
255 TensorInfo(TensorShape(20U), 1, DataType::S32),
256 })),
257 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
258 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
259 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
260 })),
261 framework::dataset::make("Min",{ -10,
262 -200,
263 -113,
264 })),
265 framework::dataset::make("Max",{ 105,
266 300,
267 -18,
268 })),
269 framework::dataset::make("Expected", { true, false, false })),
270 a_info, b_info, output_info, min, max, expected)
271{
272 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
273 output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
274 output_stage.gemmlowp_min_bound = min;
275 output_stage.gemmlowp_max_bound = max;
276 output_stage.output_data_type = DataType::QASYMM8_SIGNED;
277
278 // Lock tensors
279 Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false),
280 &b_info.clone()->set_is_resizable(false),
281 &output_info.clone()->set_is_resizable(false),
282 output_stage);
283 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
284}
285// clang-format on
286// *INDENT-ON*
287
288FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_cases))
289{
290 // Validate output
291 validate(Accessor(_target), _reference);
292}
293
294TEST_SUITE(BoundedReLu)
295FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_relu_cases))
296{
297 // Validate output
298 validate(Accessor(_target), _reference);
299}
300
301TEST_SUITE_END() // BoundedReLu
302
303TEST_SUITE_END() // QASYMM8_SIGNED
304
305TEST_SUITE_END() // QuantizeDownInt32Scale
Gian Marco58c57942017-11-28 09:10:03 +0000306
307TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
308
309const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
310 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000311 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
Gian Marco58c57942017-11-28 09:10:03 +0000312
313const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
314 2)
315 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
316
317using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
318 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
319
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100320using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
321 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
322
Gian Marco7f0f7902017-12-07 09:26:56 +0000323// *INDENT-OFF*
324// clang-format off
325DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
326 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
Gian Marco7f0f7902017-12-07 09:26:56 +0000327 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
328 }),
329 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Gian Marco7f0f7902017-12-07 09:26:56 +0000330 TensorInfo(TensorShape(20U), 1, DataType::S32),
331 })),
332 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8),
Gian Marco7f0f7902017-12-07 09:26:56 +0000333 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
334 })),
335 framework::dataset::make("Min",{ 0,
Gian Marco7f0f7902017-12-07 09:26:56 +0000336 13,
337 })),
338 framework::dataset::make("Max",{ 205,
Gian Marco7f0f7902017-12-07 09:26:56 +0000339 180,
340 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000341 framework::dataset::make("Expected", { true, false })),
Gian Marco7f0f7902017-12-07 09:26:56 +0000342 a_info, b_info, output_info, min, max, expected)
343{
344 // Lock tensors
345 Status status = NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
346 &b_info.clone()->set_is_resizable(false),
347 &output_info.clone()->set_is_resizable(false),
348 min,
349 max);
350 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
351}
352// clang-format on
353// *INDENT-ON*
354
Gian Marco58c57942017-11-28 09:10:03 +0000355FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
356 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
357{
358 // Validate output
359 validate(Accessor(_target), _reference);
360}
361
362FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
363 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
364{
365 // Validate output
366 validate(Accessor(_target), _reference);
367}
368
369TEST_SUITE(BoundedReLu)
370FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
371 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
372{
373 // Validate output
374 validate(Accessor(_target), _reference);
375}
376
377FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
378 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
379{
380 // Validate output
381 validate(Accessor(_target), _reference);
382}
383TEST_SUITE_END() // BoundedReLu
384
385TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100386
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000387TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
388
389const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
390 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000391 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128) * framework::dataset::make("max", 128) * framework::dataset::make("addBias", { false, true });
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000392
393const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
394 2)
395 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
396
397using NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture =
398 GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
399
400// *INDENT-OFF*
401// clang-format off
402DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
403 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::F32), // Invalid input data type
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000404 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
405 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),
406 }),
407 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000408 TensorInfo(TensorShape(20U), 1, DataType::S32),
409 TensorInfo(TensorShape(21U), 1, DataType::S32),
410 })),
411 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000412 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
413 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
414 })),
415 framework::dataset::make("Min",{ -110,
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000416 -113,
417 -113,
418 })),
419 framework::dataset::make("Max",{ 87,
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000420 97,
421 97,
422 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000423 framework::dataset::make("Expected", { false, false, true })),
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000424 a_info, b_info, output_info, min, max, expected)
425{
426 // Lock tensors
427 Status status = NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
428 &b_info.clone()->set_is_resizable(false),
429 &output_info.clone()->set_is_resizable(false),
430 min,
431 max);
432 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
433}
434// clang-format on
435// *INDENT-ON*
436
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000437FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
438 quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
439{
440 // Validate output
441 validate(Accessor(_target), _reference);
442}
443
444TEST_SUITE(BoundedReLu)
445FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
446 quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
447{
448 // Validate output
449 validate(Accessor(_target), _reference);
450}
451TEST_SUITE_END() // BoundedReLu
452TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint
453
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100454TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
455
456const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
457 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000458 * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100459
460const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
461 2)
462 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000463const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823,
464 1073741825)
465 * framework::dataset::make("result_shift", -3,
466 -2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000467 * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100468
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000469const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600,
470 254601602)
471 * framework::dataset::make("result_shift", -3,
472 -1)
473 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100474
475using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
476 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
477
478// *INDENT-OFF*
479// clang-format off
480DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
481 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100482 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
483 }),
484 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100485 TensorInfo(TensorShape(20U), 1, DataType::S32),
486 })),
487 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100488 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
489 })),
490 framework::dataset::make("Min",{ -205,
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100491 -180,
492 })),
493 framework::dataset::make("Max",{ 205,
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100494 180,
495 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000496 framework::dataset::make("Expected", { true, false })),
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100497 a_info, b_info, output_info, min, max, expected)
498{
499 // Lock tensors
500 Status status = NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
501 &b_info.clone()->set_is_resizable(false),
502 &output_info.clone()->set_is_resizable(false),
503 min,
504 max);
505 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
506}
507// clang-format on
508// *INDENT-ON*
509
Manuel Bottini07263982019-10-17 18:37:26 +0100510TEST_SUITE(NoRelu)
511TEST_SUITE(MultSmallerEq1)
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100512FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
513 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
514{
515 // Validate output
516 validate(Accessor(_target), _reference);
517}
Manuel Bottini07263982019-10-17 18:37:26 +0100518TEST_SUITE_END() // MultSmallerEq1
519TEST_SUITE(MultGreater1)
520FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
521 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
522{
523 // Validate output
524 validate(Accessor(_target), _reference);
525}
526TEST_SUITE_END() // MultGreater1
527TEST_SUITE_END() // NoRelu
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100528TEST_SUITE(BoundedReLu)
Manuel Bottini07263982019-10-17 18:37:26 +0100529TEST_SUITE(MultSmallerEq1)
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100530FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
531 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
532{
533 // Validate output
534 validate(Accessor(_target), _reference);
535}
Manuel Bottini07263982019-10-17 18:37:26 +0100536TEST_SUITE_END() // MultSmallerEq1
537TEST_SUITE(MultGreater1)
538FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
539 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
540{
541 // Validate output
542 validate(Accessor(_target), _reference);
543}
544TEST_SUITE_END() // MultGreater1
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100545TEST_SUITE_END() // BoundedReLu
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100546TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
Gian Marcoe75a02b2017-11-08 12:24:09 +0000547TEST_SUITE_END() // OutputStage
Gian Marcoe75a02b2017-11-08 12:24:09 +0000548TEST_SUITE_END() // GEMMLowp
Sheri Zhangac6499a2021-02-10 15:32:38 +0000549TEST_SUITE_END() // Neon
Pablo Tello299025a2017-09-29 11:30:12 +0100550} // namespace validation
551} // namespace test
552} // namespace arm_compute