blob: 2890eb161bca12cef70fddd4aba8d8f7ce5a6d72 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Michalis Spyrou80943252019-01-10 17:19:50 +00002 * Copyright (c) 2017-2019 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
28#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
29#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000031#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marco05288a22017-11-21 10:57:50 +000032#include "tests/datasets/LargeGEMMLowpDataset.h"
33#include "tests/datasets/ShapeDatasets.h"
34#include "tests/datasets/SmallGEMMLowpDataset.h"
35#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/GEMMLowpFixture.h"
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
47TEST_SUITE(CL)
48TEST_SUITE(GEMMLowp)
49
50TEST_SUITE(MatrixMultiplyCore)
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010051
Gian Marco05288a22017-11-21 10:57:50 +000052using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
53
Michalis Spyrou80943252019-01-10 17:19:50 +000054DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset(),
Gian Marco05288a22017-11-21 10:57:50 +000055 shape_a, shape_b, shape_c, a_offset, b_offset)
56{
57 // Create tensors
58 CLTensor a = create_tensor<CLTensor>(shape_a, DataType::QASYMM8);
59 CLTensor b = create_tensor<CLTensor>(shape_b, DataType::QASYMM8);
60 CLTensor c = create_tensor<CLTensor>(shape_c, DataType::S32);
61
62 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
63 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
64
65 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
66 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
67 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
68
69 // Create and configure function
70 CLGEMMLowpMatrixMultiplyCore gemmlowp_mm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +010071 // TODO (giaiod01) COMPMID-1672 - Extending the test to validate add bias in offset contribution
72 gemmlowp_mm.configure(&a, &b, nullptr, &c);
Gian Marco05288a22017-11-21 10:57:50 +000073}
74
75FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
76{
77 // Validate output
78 validate(CLAccessor(_target), _reference);
79}
80
81FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
82{
83 // Validate output
84 validate(CLAccessor(_target), _reference);
85}
86
George Wort2d7e6832019-02-22 16:37:41 +000087using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
88TEST_SUITE(FusedOffsetOutput)
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000089FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputDataset(),
90 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +000091{
92 // Validate output
93 validate(CLAccessor(_target), _reference);
94}
95
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000096FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpFusedOffsetOutputDataset(),
97 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +000098{
99 // Validate output
100 validate(CLAccessor(_target), _reference);
101}
102TEST_SUITE_END() // FusedOffsetOutput
103
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100104TEST_SUITE(Output3D)
105using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>;
106FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset())
107{
108 // Validate output
109 validate(CLAccessor(_target), _reference);
110}
111FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpOutput3DDataset())
112{
113 // Validate output
114 validate(CLAccessor(_target), _reference);
115}
116TEST_SUITE_END() // Output3D
117
118TEST_SUITE(InputOutput3D)
119using CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, true, true>;
120FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpInputOutput3DDataset())
121{
122 // Validate output
123 validate(CLAccessor(_target), _reference);
124}
125FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpInputOutput3DDataset())
126{
127 // Validate output
128 validate(CLAccessor(_target), _reference);
129}
130TEST_SUITE_END() // InputOutput3D
Gian Marco05288a22017-11-21 10:57:50 +0000131TEST_SUITE_END() // MatrixMultiplyCore
132
133TEST_SUITE(OutputStage)
134TEST_SUITE(QuantizeDownInt32ToUint8Scale)
135
136const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
137 3)
138 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
139
140const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
141 2)
142 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true });
143
144using CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8Scale>;
145
Michalis Spyrou80943252019-01-10 17:19:50 +0000146DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases),
Gian Marco05288a22017-11-21 10:57:50 +0000147 shape, result_offset, result_mult_int, result_shift, min, max, add_bias)
148{
149 TensorShape shape_bias(shape[0]);
150
151 // Create tensors
152 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
153 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
154 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
155
156 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
157 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
158 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
159
160 // Create and configure function
161 CLGEMMLowpQuantizeDownInt32ToUint8Scale output_stage;
162 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max);
163
164 // Validate valid region input and output
165 const ValidRegion valid_region = shape_to_valid_region(shape);
166 validate(in.info()->valid_region(), valid_region);
167 validate(out.info()->valid_region(), valid_region);
168
169 // Validate valid region bias
170 if(add_bias)
171 {
172 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
173 validate(bias.info()->valid_region(), valid_region_bias);
174 }
175
176 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100177 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco05288a22017-11-21 10:57:50 +0000178 validate(in.info()->padding(), padding);
179 validate(out.info()->padding(), padding);
180
181 if(add_bias)
182 {
183 validate(bias.info()->padding(), padding);
184 }
185}
186
Gian Marco58c57942017-11-28 09:10:03 +0000187FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000188{
189 // Validate output
190 validate(CLAccessor(_target), _reference);
191}
192
Gian Marco58c57942017-11-28 09:10:03 +0000193FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000194{
195 // Validate output
196 validate(CLAccessor(_target), _reference);
197}
198
199TEST_SUITE(BoundedReLu)
Gian Marco58c57942017-11-28 09:10:03 +0000200FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000201{
202 // Validate output
203 validate(CLAccessor(_target), _reference);
204}
205
Gian Marco58c57942017-11-28 09:10:03 +0000206FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
207 quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000208{
209 // Validate output
210 validate(CLAccessor(_target), _reference);
211}
212TEST_SUITE_END() // BoundedReLu
Gian Marco05288a22017-11-21 10:57:50 +0000213TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
Gian Marco58c57942017-11-28 09:10:03 +0000214TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000215const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
216 2)
217 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
218
219const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
220 2)
221 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
Gian Marco58c57942017-11-28 09:10:03 +0000222using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
223 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
224
Manuel Bottini1f332d42019-11-29 17:25:25 +0000225DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
Gian Marco58c57942017-11-28 09:10:03 +0000226 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
227{
228 TensorShape shape_bias(shape[0]);
229
230 // Create tensors
231 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
232 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
233 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
234
235 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
236 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
237 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
238
239 // Create and configure function
240 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
241 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
242
243 // Validate valid region input and output
244 const ValidRegion valid_region = shape_to_valid_region(shape);
245 validate(in.info()->valid_region(), valid_region);
246 validate(out.info()->valid_region(), valid_region);
247
248 // Validate valid region bias
249 if(add_bias)
250 {
251 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
252 validate(bias.info()->valid_region(), valid_region_bias);
253 }
254
255 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100256 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco58c57942017-11-28 09:10:03 +0000257 validate(in.info()->padding(), padding);
258 validate(out.info()->padding(), padding);
259
260 if(add_bias)
261 {
262 validate(bias.info()->padding(), padding);
263 }
264}
265
266FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
267 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
268{
269 // Validate output
270 validate(CLAccessor(_target), _reference);
271}
272
273FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
274 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
275{
276 // Validate output
277 validate(CLAccessor(_target), _reference);
278}
279
280TEST_SUITE(BoundedReLu)
281FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
282 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
283{
284 // Validate output
285 validate(CLAccessor(_target), _reference);
286}
287
288FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
289 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
290{
291 // Validate output
292 validate(CLAccessor(_target), _reference);
293}
294TEST_SUITE_END() // BoundedReLu
295TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
Manuel Bottini1f332d42019-11-29 17:25:25 +0000296TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
297const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2)
298 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
299
300const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2)
301 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128, -126) * framework::dataset::make("max", 110, 112) * framework::dataset::make("addBias", { false, true });
302using CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture =
303 GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
304
305DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_by_fixedpoint_cases),
306 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
307{
308 TensorShape shape_bias(shape[0]);
309
310 // Create tensors
311 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
312 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
313 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8_SIGNED);
314
315 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
316 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
317 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
318
319 // Create and configure function
320 CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage;
321 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
322
323 // Validate valid region input and output
324 const ValidRegion valid_region = shape_to_valid_region(shape);
325 validate(in.info()->valid_region(), valid_region);
326 validate(out.info()->valid_region(), valid_region);
327
328 // Validate valid region bias
329 if(add_bias)
330 {
331 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
332 validate(bias.info()->valid_region(), valid_region_bias);
333 }
334
335 // Validate padding
336 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
337 validate(in.info()->padding(), padding);
338 validate(out.info()->padding(), padding);
339
340 if(add_bias)
341 {
342 validate(bias.info()->padding(), padding);
343 }
344}
345
346FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
347 quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
348{
349 // Validate output
350 validate(CLAccessor(_target), _reference);
351}
352
353TEST_SUITE(BoundedReLu)
354FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
355 quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
356{
357 // Validate output
358 validate(CLAccessor(_target), _reference);
359}
360
361TEST_SUITE_END() // BoundedReLu
362TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100363TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000364
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100365const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
366 2)
367 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
368
369const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
370 2)
371 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
372
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000373const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823,
374 1073741825)
375 * framework::dataset::make("result_shift", -3,
376 -2)
377 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100378
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000379const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600,
380 254601602)
381 * framework::dataset::make("result_shift", -3,
382 -1)
383 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100384
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100385using CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
386 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
387
388// *INDENT-OFF*
389// clang-format off
390DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
391 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),
392 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
393 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Wrong output data type
394 }),
395 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
396 TensorInfo(TensorShape(21U), 1, DataType::S32),
397 TensorInfo(TensorShape(21U), 1, DataType::S32),
398 })),
399 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
400 TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
401 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
402 })),
403 framework::dataset::make("Min",{ -205,
404 -60000,
405 -180,
406 })),
407 framework::dataset::make("Max",{ 205,
408 60000,
409 180,
410 })),
411 framework::dataset::make("Expected", { true, false, false })),
412 a_info, b_info, output_info, min, max, expected)
413{
414 // Lock tensors
415 Status status = CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(true),
416 &b_info.clone()->set_is_resizable(true),
417 &output_info.clone()->set_is_resizable(true),
418 min,
419 max);
420 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
421}
422// clang-format on
423// *INDENT-ON*
Manuel Bottini07263982019-10-17 18:37:26 +0100424TEST_SUITE(NoRelu)
425TEST_SUITE(MultSmallerEq1)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100426FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
427 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
428{
429 // Validate output
430 validate(CLAccessor(_target), _reference);
431}
Manuel Bottini07263982019-10-17 18:37:26 +0100432TEST_SUITE_END() // MultSmallerEq1
433TEST_SUITE(MultGreater1)
434FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
435 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
436{
437 // Validate output
438 validate(CLAccessor(_target), _reference);
439}
440TEST_SUITE_END() // MultGreater1
441TEST_SUITE_END() // NoRelu
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100442TEST_SUITE(BoundedReLu)
Manuel Bottini07263982019-10-17 18:37:26 +0100443TEST_SUITE(MultSmallerEq1)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100444FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
445 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
446{
447 // Validate output
448 validate(CLAccessor(_target), _reference);
449}
Manuel Bottini07263982019-10-17 18:37:26 +0100450TEST_SUITE_END() // MultSmallerEq1
451TEST_SUITE(MultGreater1)
452FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
453 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
454{
455 // Validate output
456 validate(CLAccessor(_target), _reference);
457}
458TEST_SUITE_END() // MultGreater1
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100459TEST_SUITE_END() // BoundedReLu
460TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
Gian Marco05288a22017-11-21 10:57:50 +0000461TEST_SUITE_END() // OutputStage
462TEST_SUITE_END() // GEMMLowp
463TEST_SUITE_END() // CL
464} // namespace validation
465} // namespace test
466} // namespace arm_compute