blob: 29649d8c9f712488c3e05eec0965e8741cbe874d [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
28#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
29#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000031#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marco05288a22017-11-21 10:57:50 +000032#include "tests/datasets/LargeGEMMLowpDataset.h"
33#include "tests/datasets/ShapeDatasets.h"
34#include "tests/datasets/SmallGEMMLowpDataset.h"
35#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/GEMMLowpFixture.h"
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
Manuel Bottini959c26d2019-12-02 16:22:35 +000047namespace
48{
49constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
50}
Gian Marco05288a22017-11-21 10:57:50 +000051TEST_SUITE(CL)
52TEST_SUITE(GEMMLowp)
53
54TEST_SUITE(MatrixMultiplyCore)
55using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
56
Michalis Spyrou80943252019-01-10 17:19:50 +000057DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset(),
Gian Marco05288a22017-11-21 10:57:50 +000058 shape_a, shape_b, shape_c, a_offset, b_offset)
59{
60 // Create tensors
61 CLTensor a = create_tensor<CLTensor>(shape_a, DataType::QASYMM8);
62 CLTensor b = create_tensor<CLTensor>(shape_b, DataType::QASYMM8);
63 CLTensor c = create_tensor<CLTensor>(shape_c, DataType::S32);
64
65 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
66 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
67
68 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
69 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
70 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
71
72 // Create and configure function
73 CLGEMMLowpMatrixMultiplyCore gemmlowp_mm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +010074 // TODO (giaiod01) COMPMID-1672 - Extending the test to validate add bias in offset contribution
75 gemmlowp_mm.configure(&a, &b, nullptr, &c);
Gian Marco05288a22017-11-21 10:57:50 +000076}
77
78FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
79{
80 // Validate output
81 validate(CLAccessor(_target), _reference);
82}
83
84FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
85{
86 // Validate output
87 validate(CLAccessor(_target), _reference);
88}
89
George Wort2d7e6832019-02-22 16:37:41 +000090TEST_SUITE(FusedOffsetOutput)
Manuel Bottini959c26d2019-12-02 16:22:35 +000091TEST_SUITE(QASYMM8)
92using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputUint8Fixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
93FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputUint8Fixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000094 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +000095{
96 // Validate output
Manuel Bottini959c26d2019-12-02 16:22:35 +000097 validate(CLAccessor(_target), _reference, tolerance_quant);
George Wort2d7e6832019-02-22 16:37:41 +000098}
99
Manuel Bottini959c26d2019-12-02 16:22:35 +0000100FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputUint8Fixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000101 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +0000102{
103 // Validate output
Manuel Bottini959c26d2019-12-02 16:22:35 +0000104 validate(CLAccessor(_target), _reference, tolerance_quant);
George Wort2d7e6832019-02-22 16:37:41 +0000105}
Manuel Bottini959c26d2019-12-02 16:22:35 +0000106TEST_SUITE_END() // QASYMM8
107TEST_SUITE(QASYMM8_SIGNED)
108using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputInt8Fixture =
109 GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, false, int8_t, int8_t>;
110FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputInt8Fixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputInt8Dataset(),
111 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })))
112{
113 // Validate output
114 validate(CLAccessor(_target), _reference, tolerance_quant);
115}
116TEST_SUITE_END() // QASYMM8_SIGNED
George Wort2d7e6832019-02-22 16:37:41 +0000117TEST_SUITE_END() // FusedOffsetOutput
118
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100119TEST_SUITE(Output3D)
120using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>;
121FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset())
122{
123 // Validate output
124 validate(CLAccessor(_target), _reference);
125}
126FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpOutput3DDataset())
127{
128 // Validate output
129 validate(CLAccessor(_target), _reference);
130}
131TEST_SUITE_END() // Output3D
132
133TEST_SUITE(InputOutput3D)
134using CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, true, true>;
135FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpInputOutput3DDataset())
136{
137 // Validate output
138 validate(CLAccessor(_target), _reference);
139}
140FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpInputOutput3DDataset())
141{
142 // Validate output
143 validate(CLAccessor(_target), _reference);
144}
145TEST_SUITE_END() // InputOutput3D
Gian Marco05288a22017-11-21 10:57:50 +0000146TEST_SUITE_END() // MatrixMultiplyCore
147
148TEST_SUITE(OutputStage)
Gian Marco05288a22017-11-21 10:57:50 +0000149
Luca Foschiani4b869532020-02-13 15:07:36 +0000150TEST_SUITE(QuantizeDownInt32Scale)
151
152TEST_SUITE(QASYMM8)
153
154const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
155 3)
156 * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
157
158const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
159 2)
160 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true });
161
162using CLGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage>;
163
164FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
165{
166 // Validate output
167 validate(CLAccessor(_target), _reference);
168}
169
170TEST_SUITE(BoundedReLu)
171FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
172{
173 // Validate output
174 validate(CLAccessor(_target), _reference);
175}
176
177TEST_SUITE_END() // BoundedReLu
178TEST_SUITE_END() // QASYMM8
179
180TEST_SUITE(QASYMM8_SIGNED)
181
182const auto quantize_down_int32_to_int8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
183 3)
184 * framework::dataset::make("min", -128) * framework::dataset::make("max", 127) * framework::dataset::make("addBias", { false, true });
185
186const auto quantize_down_int32_to_int8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
187 2)
188 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", -100, -98) * framework::dataset::make("max", 71, 73) * framework::dataset::make("addBias", { false, true });
189
190using CLGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage>;
191
192FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_cases))
193{
194 // Validate output
195 validate(CLAccessor(_target), _reference);
196}
197
198TEST_SUITE(BoundedReLu)
199FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_relu_cases))
200{
201 // Validate output
202 validate(CLAccessor(_target), _reference);
203}
204
205TEST_SUITE_END() // BoundedReLu
206TEST_SUITE_END() // QASYMM8_SIGNED
207TEST_SUITE_END() // QuantizeDownInt32Scale
208
Gian Marco58c57942017-11-28 09:10:03 +0000209TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000210const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
211 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000212 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
Gian Marco58c57942017-11-28 09:10:03 +0000213
214const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
215 2)
216 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
Gian Marco58c57942017-11-28 09:10:03 +0000217using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
218 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
219
Manuel Bottini1f332d42019-11-29 17:25:25 +0000220DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
Gian Marco58c57942017-11-28 09:10:03 +0000221 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
222{
223 TensorShape shape_bias(shape[0]);
224
225 // Create tensors
226 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
227 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
228 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
229
230 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
231 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
232 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
233
234 // Create and configure function
235 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
236 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
237
238 // Validate valid region input and output
239 const ValidRegion valid_region = shape_to_valid_region(shape);
240 validate(in.info()->valid_region(), valid_region);
241 validate(out.info()->valid_region(), valid_region);
242
243 // Validate valid region bias
244 if(add_bias)
245 {
246 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
247 validate(bias.info()->valid_region(), valid_region_bias);
248 }
249
250 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100251 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco58c57942017-11-28 09:10:03 +0000252 validate(in.info()->padding(), padding);
253 validate(out.info()->padding(), padding);
254
255 if(add_bias)
256 {
257 validate(bias.info()->padding(), padding);
258 }
259}
260
261FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
262 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
263{
264 // Validate output
265 validate(CLAccessor(_target), _reference);
266}
267
268FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
269 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
270{
271 // Validate output
272 validate(CLAccessor(_target), _reference);
273}
274
275TEST_SUITE(BoundedReLu)
276FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
277 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
278{
279 // Validate output
280 validate(CLAccessor(_target), _reference);
281}
282
283FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
284 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
285{
286 // Validate output
287 validate(CLAccessor(_target), _reference);
288}
289TEST_SUITE_END() // BoundedReLu
290TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
Manuel Bottini1f332d42019-11-29 17:25:25 +0000291TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
292const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000293 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128) * framework::dataset::make("max", 128) * framework::dataset::make("addBias", { false, true });
Manuel Bottini1f332d42019-11-29 17:25:25 +0000294
295const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2)
296 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128, -126) * framework::dataset::make("max", 110, 112) * framework::dataset::make("addBias", { false, true });
297using CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture =
298 GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
299
300DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_by_fixedpoint_cases),
301 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
302{
303 TensorShape shape_bias(shape[0]);
304
305 // Create tensors
306 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
307 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
308 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8_SIGNED);
309
310 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
311 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
312 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
313
314 // Create and configure function
315 CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage;
316 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
317
318 // Validate valid region input and output
319 const ValidRegion valid_region = shape_to_valid_region(shape);
320 validate(in.info()->valid_region(), valid_region);
321 validate(out.info()->valid_region(), valid_region);
322
323 // Validate valid region bias
324 if(add_bias)
325 {
326 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
327 validate(bias.info()->valid_region(), valid_region_bias);
328 }
329
330 // Validate padding
331 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
332 validate(in.info()->padding(), padding);
333 validate(out.info()->padding(), padding);
334
335 if(add_bias)
336 {
337 validate(bias.info()->padding(), padding);
338 }
339}
340
341FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
342 quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
343{
344 // Validate output
345 validate(CLAccessor(_target), _reference);
346}
347
348TEST_SUITE(BoundedReLu)
349FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
350 quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
351{
352 // Validate output
353 validate(CLAccessor(_target), _reference);
354}
355
356TEST_SUITE_END() // BoundedReLu
357TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100358TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000359
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100360const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
361 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000362 * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100363
364const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
365 2)
366 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
367
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000368const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823,
369 1073741825)
370 * framework::dataset::make("result_shift", -3,
371 -2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000372 * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100373
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000374const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600,
375 254601602)
376 * framework::dataset::make("result_shift", -3,
377 -1)
378 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100379
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100380using CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
381 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
382
383// *INDENT-OFF*
384// clang-format off
385DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
386 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100387 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Wrong output data type
388 }),
389 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
390 TensorInfo(TensorShape(21U), 1, DataType::S32),
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100391 })),
392 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100393 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
394 })),
395 framework::dataset::make("Min",{ -205,
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100396 -180,
397 })),
398 framework::dataset::make("Max",{ 205,
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100399 180,
400 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000401 framework::dataset::make("Expected", { true, false })),
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100402 a_info, b_info, output_info, min, max, expected)
403{
404 // Lock tensors
405 Status status = CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(true),
406 &b_info.clone()->set_is_resizable(true),
407 &output_info.clone()->set_is_resizable(true),
408 min,
409 max);
410 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
411}
412// clang-format on
413// *INDENT-ON*
Manuel Bottini07263982019-10-17 18:37:26 +0100414TEST_SUITE(NoRelu)
415TEST_SUITE(MultSmallerEq1)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100416FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
417 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
418{
419 // Validate output
420 validate(CLAccessor(_target), _reference);
421}
Manuel Bottini07263982019-10-17 18:37:26 +0100422TEST_SUITE_END() // MultSmallerEq1
423TEST_SUITE(MultGreater1)
424FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
425 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
426{
427 // Validate output
428 validate(CLAccessor(_target), _reference);
429}
430TEST_SUITE_END() // MultGreater1
431TEST_SUITE_END() // NoRelu
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100432TEST_SUITE(BoundedReLu)
Manuel Bottini07263982019-10-17 18:37:26 +0100433TEST_SUITE(MultSmallerEq1)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100434FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
435 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
436{
437 // Validate output
438 validate(CLAccessor(_target), _reference);
439}
Manuel Bottini07263982019-10-17 18:37:26 +0100440TEST_SUITE_END() // MultSmallerEq1
441TEST_SUITE(MultGreater1)
442FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
443 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
444{
445 // Validate output
446 validate(CLAccessor(_target), _reference);
447}
448TEST_SUITE_END() // MultGreater1
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100449TEST_SUITE_END() // BoundedReLu
450TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
Sheri Zhang1b14c752020-03-09 14:29:52 +0000451
452TEST_SUITE(QuantizeDownInt32ScaleByFloat)
453
454TEST_SUITE(QASYMM8)
455using CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture =
456 GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage, uint8_t>;
457
458FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture, framework::DatasetMode::ALL,
459 combine(combine(combine(combine(combine(combine(framework::dataset::make("DataType", DataType::QASYMM8),
460 datasets::TinyShapes()),
461 framework::dataset::make("result_real_multiplier", 0.33f)),
462 framework::dataset::make("result_offset", 2, 3)),
463 framework::dataset::make("min", 0)),
464 framework::dataset::make("max", 255)),
465 framework::dataset::make("addBias", { false, true })))
466{
467 // Validate output
468 validate(CLAccessor(_target), _reference);
469}
470TEST_SUITE_END() // QASYMM8
471
472TEST_SUITE(QASYMM8_SIGNED)
473using CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture_Signed =
474 GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage, int8_t>;
475FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture_Signed, framework::DatasetMode::ALL,
476 combine(combine(combine(combine(combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED),
477 datasets::TinyShapes()),
478 framework::dataset::make("result_real_multiplier", 0.33f)),
479 framework::dataset::make("result_offset", 2, 3)),
480 framework::dataset::make("min", -128)),
481 framework::dataset::make("max", 127)),
482 framework::dataset::make("addBias", { false, true })))
483{
484 // Validate output
485 validate(CLAccessor(_target), _reference);
486}
487TEST_SUITE_END() // QASYMM8_SIGNED
488
489TEST_SUITE_END() // QuantizeDownInt32ScaleByFloat
490
Gian Marco05288a22017-11-21 10:57:50 +0000491TEST_SUITE_END() // OutputStage
492TEST_SUITE_END() // GEMMLowp
493TEST_SUITE_END() // CL
494} // namespace validation
495} // namespace test
496} // namespace arm_compute