blob: eb42c4c659186cc90b43f3413c52531abe8ef5a0 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Manuel Bottini959c26d2019-12-02 16:22:35 +00002 * Copyright (c) 2017-2020 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
28#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
29#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000031#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marco05288a22017-11-21 10:57:50 +000032#include "tests/datasets/LargeGEMMLowpDataset.h"
33#include "tests/datasets/ShapeDatasets.h"
34#include "tests/datasets/SmallGEMMLowpDataset.h"
35#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/GEMMLowpFixture.h"
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
Manuel Bottini959c26d2019-12-02 16:22:35 +000047namespace
48{
49constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
50}
Gian Marco05288a22017-11-21 10:57:50 +000051TEST_SUITE(CL)
52TEST_SUITE(GEMMLowp)
53
54TEST_SUITE(MatrixMultiplyCore)
55using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
56
Michalis Spyrou80943252019-01-10 17:19:50 +000057DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset(),
Gian Marco05288a22017-11-21 10:57:50 +000058 shape_a, shape_b, shape_c, a_offset, b_offset)
59{
60 // Create tensors
61 CLTensor a = create_tensor<CLTensor>(shape_a, DataType::QASYMM8);
62 CLTensor b = create_tensor<CLTensor>(shape_b, DataType::QASYMM8);
63 CLTensor c = create_tensor<CLTensor>(shape_c, DataType::S32);
64
65 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
66 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
67
68 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
69 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
70 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
71
72 // Create and configure function
73 CLGEMMLowpMatrixMultiplyCore gemmlowp_mm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +010074 // TODO (giaiod01) COMPMID-1672 - Extending the test to validate add bias in offset contribution
75 gemmlowp_mm.configure(&a, &b, nullptr, &c);
Gian Marco05288a22017-11-21 10:57:50 +000076}
77
78FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
79{
80 // Validate output
81 validate(CLAccessor(_target), _reference);
82}
83
84FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
85{
86 // Validate output
87 validate(CLAccessor(_target), _reference);
88}
89
George Wort2d7e6832019-02-22 16:37:41 +000090TEST_SUITE(FusedOffsetOutput)
Manuel Bottini959c26d2019-12-02 16:22:35 +000091TEST_SUITE(QASYMM8)
92using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputUint8Fixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
93FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputUint8Fixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000094 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +000095{
96 // Validate output
Manuel Bottini959c26d2019-12-02 16:22:35 +000097 validate(CLAccessor(_target), _reference, tolerance_quant);
George Wort2d7e6832019-02-22 16:37:41 +000098}
99
Manuel Bottini959c26d2019-12-02 16:22:35 +0000100FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputUint8Fixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000101 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +0000102{
103 // Validate output
Manuel Bottini959c26d2019-12-02 16:22:35 +0000104 validate(CLAccessor(_target), _reference, tolerance_quant);
George Wort2d7e6832019-02-22 16:37:41 +0000105}
Manuel Bottini959c26d2019-12-02 16:22:35 +0000106TEST_SUITE_END() // QASYMM8
107TEST_SUITE(QASYMM8_SIGNED)
108using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputInt8Fixture =
109 GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, false, int8_t, int8_t>;
110FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputInt8Fixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputInt8Dataset(),
111 framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })))
112{
113 // Validate output
114 validate(CLAccessor(_target), _reference, tolerance_quant);
115}
116TEST_SUITE_END() // QASYMM8_SIGNED
George Wort2d7e6832019-02-22 16:37:41 +0000117TEST_SUITE_END() // FusedOffsetOutput
118
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100119TEST_SUITE(Output3D)
120using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>;
121FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset())
122{
123 // Validate output
124 validate(CLAccessor(_target), _reference);
125}
126FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpOutput3DDataset())
127{
128 // Validate output
129 validate(CLAccessor(_target), _reference);
130}
131TEST_SUITE_END() // Output3D
132
133TEST_SUITE(InputOutput3D)
134using CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, true, true>;
135FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpInputOutput3DDataset())
136{
137 // Validate output
138 validate(CLAccessor(_target), _reference);
139}
140FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpInputOutput3DDataset())
141{
142 // Validate output
143 validate(CLAccessor(_target), _reference);
144}
145TEST_SUITE_END() // InputOutput3D
Gian Marco05288a22017-11-21 10:57:50 +0000146TEST_SUITE_END() // MatrixMultiplyCore
147
148TEST_SUITE(OutputStage)
149TEST_SUITE(QuantizeDownInt32ToUint8Scale)
150
151const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
152 3)
153 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
154
155const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
156 2)
157 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true });
158
159using CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8Scale>;
160
Michalis Spyrou80943252019-01-10 17:19:50 +0000161DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases),
Gian Marco05288a22017-11-21 10:57:50 +0000162 shape, result_offset, result_mult_int, result_shift, min, max, add_bias)
163{
164 TensorShape shape_bias(shape[0]);
165
166 // Create tensors
167 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
168 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
169 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
170
171 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
172 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
174
175 // Create and configure function
176 CLGEMMLowpQuantizeDownInt32ToUint8Scale output_stage;
177 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max);
178
179 // Validate valid region input and output
180 const ValidRegion valid_region = shape_to_valid_region(shape);
181 validate(in.info()->valid_region(), valid_region);
182 validate(out.info()->valid_region(), valid_region);
183
184 // Validate valid region bias
185 if(add_bias)
186 {
187 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
188 validate(bias.info()->valid_region(), valid_region_bias);
189 }
190
191 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100192 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco05288a22017-11-21 10:57:50 +0000193 validate(in.info()->padding(), padding);
194 validate(out.info()->padding(), padding);
195
196 if(add_bias)
197 {
198 validate(bias.info()->padding(), padding);
199 }
200}
201
Gian Marco58c57942017-11-28 09:10:03 +0000202FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000203{
204 // Validate output
205 validate(CLAccessor(_target), _reference);
206}
207
Gian Marco58c57942017-11-28 09:10:03 +0000208FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000209{
210 // Validate output
211 validate(CLAccessor(_target), _reference);
212}
213
214TEST_SUITE(BoundedReLu)
Gian Marco58c57942017-11-28 09:10:03 +0000215FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000216{
217 // Validate output
218 validate(CLAccessor(_target), _reference);
219}
220
Gian Marco58c57942017-11-28 09:10:03 +0000221FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
222 quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000223{
224 // Validate output
225 validate(CLAccessor(_target), _reference);
226}
227TEST_SUITE_END() // BoundedReLu
Gian Marco05288a22017-11-21 10:57:50 +0000228TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
Gian Marco58c57942017-11-28 09:10:03 +0000229TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000230const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
231 2)
232 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
233
234const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
235 2)
236 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
Gian Marco58c57942017-11-28 09:10:03 +0000237using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
238 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
239
Manuel Bottini1f332d42019-11-29 17:25:25 +0000240DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
Gian Marco58c57942017-11-28 09:10:03 +0000241 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
242{
243 TensorShape shape_bias(shape[0]);
244
245 // Create tensors
246 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
247 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
248 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
249
250 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
251 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
252 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
253
254 // Create and configure function
255 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
256 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
257
258 // Validate valid region input and output
259 const ValidRegion valid_region = shape_to_valid_region(shape);
260 validate(in.info()->valid_region(), valid_region);
261 validate(out.info()->valid_region(), valid_region);
262
263 // Validate valid region bias
264 if(add_bias)
265 {
266 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
267 validate(bias.info()->valid_region(), valid_region_bias);
268 }
269
270 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100271 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco58c57942017-11-28 09:10:03 +0000272 validate(in.info()->padding(), padding);
273 validate(out.info()->padding(), padding);
274
275 if(add_bias)
276 {
277 validate(bias.info()->padding(), padding);
278 }
279}
280
281FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
282 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
283{
284 // Validate output
285 validate(CLAccessor(_target), _reference);
286}
287
288FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
289 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
290{
291 // Validate output
292 validate(CLAccessor(_target), _reference);
293}
294
295TEST_SUITE(BoundedReLu)
296FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
297 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
298{
299 // Validate output
300 validate(CLAccessor(_target), _reference);
301}
302
303FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
304 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
305{
306 // Validate output
307 validate(CLAccessor(_target), _reference);
308}
309TEST_SUITE_END() // BoundedReLu
310TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
Manuel Bottini1f332d42019-11-29 17:25:25 +0000311TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
312const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2)
313 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
314
315const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2)
316 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128, -126) * framework::dataset::make("max", 110, 112) * framework::dataset::make("addBias", { false, true });
317using CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture =
318 GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
319
320DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_by_fixedpoint_cases),
321 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
322{
323 TensorShape shape_bias(shape[0]);
324
325 // Create tensors
326 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
327 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
328 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8_SIGNED);
329
330 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
331 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
332 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
333
334 // Create and configure function
335 CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage;
336 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
337
338 // Validate valid region input and output
339 const ValidRegion valid_region = shape_to_valid_region(shape);
340 validate(in.info()->valid_region(), valid_region);
341 validate(out.info()->valid_region(), valid_region);
342
343 // Validate valid region bias
344 if(add_bias)
345 {
346 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
347 validate(bias.info()->valid_region(), valid_region_bias);
348 }
349
350 // Validate padding
351 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
352 validate(in.info()->padding(), padding);
353 validate(out.info()->padding(), padding);
354
355 if(add_bias)
356 {
357 validate(bias.info()->padding(), padding);
358 }
359}
360
361FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
362 quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
363{
364 // Validate output
365 validate(CLAccessor(_target), _reference);
366}
367
368TEST_SUITE(BoundedReLu)
369FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
370 quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
371{
372 // Validate output
373 validate(CLAccessor(_target), _reference);
374}
375
376TEST_SUITE_END() // BoundedReLu
377TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100378TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000379
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100380const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
381 2)
382 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
383
384const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
385 2)
386 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
387
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000388const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823,
389 1073741825)
390 * framework::dataset::make("result_shift", -3,
391 -2)
392 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100393
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000394const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600,
395 254601602)
396 * framework::dataset::make("result_shift", -3,
397 -1)
398 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100399
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100400using CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
401 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
402
403// *INDENT-OFF*
404// clang-format off
405DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
406 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),
407 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
408 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Wrong output data type
409 }),
410 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
411 TensorInfo(TensorShape(21U), 1, DataType::S32),
412 TensorInfo(TensorShape(21U), 1, DataType::S32),
413 })),
414 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
415 TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
416 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
417 })),
418 framework::dataset::make("Min",{ -205,
419 -60000,
420 -180,
421 })),
422 framework::dataset::make("Max",{ 205,
423 60000,
424 180,
425 })),
426 framework::dataset::make("Expected", { true, false, false })),
427 a_info, b_info, output_info, min, max, expected)
428{
429 // Lock tensors
430 Status status = CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(true),
431 &b_info.clone()->set_is_resizable(true),
432 &output_info.clone()->set_is_resizable(true),
433 min,
434 max);
435 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
436}
437// clang-format on
438// *INDENT-ON*
Manuel Bottini07263982019-10-17 18:37:26 +0100439TEST_SUITE(NoRelu)
440TEST_SUITE(MultSmallerEq1)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100441FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
442 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
443{
444 // Validate output
445 validate(CLAccessor(_target), _reference);
446}
Manuel Bottini07263982019-10-17 18:37:26 +0100447TEST_SUITE_END() // MultSmallerEq1
448TEST_SUITE(MultGreater1)
449FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
450 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
451{
452 // Validate output
453 validate(CLAccessor(_target), _reference);
454}
455TEST_SUITE_END() // MultGreater1
456TEST_SUITE_END() // NoRelu
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100457TEST_SUITE(BoundedReLu)
Manuel Bottini07263982019-10-17 18:37:26 +0100458TEST_SUITE(MultSmallerEq1)
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100459FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
460 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
461{
462 // Validate output
463 validate(CLAccessor(_target), _reference);
464}
Manuel Bottini07263982019-10-17 18:37:26 +0100465TEST_SUITE_END() // MultSmallerEq1
466TEST_SUITE(MultGreater1)
467FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
468 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
469{
470 // Validate output
471 validate(CLAccessor(_target), _reference);
472}
473TEST_SUITE_END() // MultGreater1
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100474TEST_SUITE_END() // BoundedReLu
475TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
Gian Marco05288a22017-11-21 10:57:50 +0000476TEST_SUITE_END() // OutputStage
477TEST_SUITE_END() // GEMMLowp
478TEST_SUITE_END() // CL
479} // namespace validation
480} // namespace test
481} // namespace arm_compute