blob: b8dfc030a2c16d397e995e8546540b9a102a9281 [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Michalis Spyrou80943252019-01-10 17:19:50 +00002 * Copyright (c) 2017-2019 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
28#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
29#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000031#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marco05288a22017-11-21 10:57:50 +000032#include "tests/datasets/LargeGEMMLowpDataset.h"
33#include "tests/datasets/ShapeDatasets.h"
34#include "tests/datasets/SmallGEMMLowpDataset.h"
35#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/GEMMLowpFixture.h"
40
41namespace arm_compute
42{
43namespace test
44{
45namespace validation
46{
47TEST_SUITE(CL)
48TEST_SUITE(GEMMLowp)
49
50TEST_SUITE(MatrixMultiplyCore)
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010051
Gian Marco05288a22017-11-21 10:57:50 +000052using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
53
Michalis Spyrou80943252019-01-10 17:19:50 +000054DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset(),
Gian Marco05288a22017-11-21 10:57:50 +000055 shape_a, shape_b, shape_c, a_offset, b_offset)
56{
57 // Create tensors
58 CLTensor a = create_tensor<CLTensor>(shape_a, DataType::QASYMM8);
59 CLTensor b = create_tensor<CLTensor>(shape_b, DataType::QASYMM8);
60 CLTensor c = create_tensor<CLTensor>(shape_c, DataType::S32);
61
62 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
63 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
64
65 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
66 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
67 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
68
69 // Create and configure function
70 CLGEMMLowpMatrixMultiplyCore gemmlowp_mm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +010071 // TODO (giaiod01) COMPMID-1672 - Extending the test to validate add bias in offset contribution
72 gemmlowp_mm.configure(&a, &b, nullptr, &c);
Gian Marco05288a22017-11-21 10:57:50 +000073}
74
75FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
76{
77 // Validate output
78 validate(CLAccessor(_target), _reference);
79}
80
81FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
82{
83 // Validate output
84 validate(CLAccessor(_target), _reference);
85}
86
George Wort2d7e6832019-02-22 16:37:41 +000087using CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
88TEST_SUITE(FusedOffsetOutput)
89FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpFusedOffsetOutputDataset())
90{
91 // Validate output
92 validate(CLAccessor(_target), _reference);
93}
94
95FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpFusedOffsetOutputDataset())
96{
97 // Validate output
98 validate(CLAccessor(_target), _reference);
99}
100TEST_SUITE_END() // FusedOffsetOutput
101
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100102TEST_SUITE(Output3D)
103using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>;
104FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset())
105{
106 // Validate output
107 validate(CLAccessor(_target), _reference);
108}
109FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpOutput3DDataset())
110{
111 // Validate output
112 validate(CLAccessor(_target), _reference);
113}
114TEST_SUITE_END() // Output3D
115
116TEST_SUITE(InputOutput3D)
117using CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, true, true>;
118FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpInputOutput3DDataset())
119{
120 // Validate output
121 validate(CLAccessor(_target), _reference);
122}
123FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpInputOutput3DDataset())
124{
125 // Validate output
126 validate(CLAccessor(_target), _reference);
127}
128TEST_SUITE_END() // InputOutput3D
Gian Marco05288a22017-11-21 10:57:50 +0000129TEST_SUITE_END() // MatrixMultiplyCore
130
131TEST_SUITE(OutputStage)
132TEST_SUITE(QuantizeDownInt32ToUint8Scale)
133
134const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
135 3)
136 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
137
138const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
139 2)
140 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true });
141
142using CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8Scale>;
143
Michalis Spyrou80943252019-01-10 17:19:50 +0000144DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases),
Gian Marco05288a22017-11-21 10:57:50 +0000145 shape, result_offset, result_mult_int, result_shift, min, max, add_bias)
146{
147 TensorShape shape_bias(shape[0]);
148
149 // Create tensors
150 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
151 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
152 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
153
154 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
155 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
156 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
157
158 // Create and configure function
159 CLGEMMLowpQuantizeDownInt32ToUint8Scale output_stage;
160 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max);
161
162 // Validate valid region input and output
163 const ValidRegion valid_region = shape_to_valid_region(shape);
164 validate(in.info()->valid_region(), valid_region);
165 validate(out.info()->valid_region(), valid_region);
166
167 // Validate valid region bias
168 if(add_bias)
169 {
170 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
171 validate(bias.info()->valid_region(), valid_region_bias);
172 }
173
174 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100175 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco05288a22017-11-21 10:57:50 +0000176 validate(in.info()->padding(), padding);
177 validate(out.info()->padding(), padding);
178
179 if(add_bias)
180 {
181 validate(bias.info()->padding(), padding);
182 }
183}
184
Gian Marco58c57942017-11-28 09:10:03 +0000185FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000186{
187 // Validate output
188 validate(CLAccessor(_target), _reference);
189}
190
Gian Marco58c57942017-11-28 09:10:03 +0000191FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000192{
193 // Validate output
194 validate(CLAccessor(_target), _reference);
195}
196
197TEST_SUITE(BoundedReLu)
Gian Marco58c57942017-11-28 09:10:03 +0000198FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000199{
200 // Validate output
201 validate(CLAccessor(_target), _reference);
202}
203
Gian Marco58c57942017-11-28 09:10:03 +0000204FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
205 quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000206{
207 // Validate output
208 validate(CLAccessor(_target), _reference);
209}
210TEST_SUITE_END() // BoundedReLu
Gian Marco05288a22017-11-21 10:57:50 +0000211TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
Gian Marco58c57942017-11-28 09:10:03 +0000212
213TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
214
215const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
216 2)
217 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
218
219const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
220 2)
221 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
222
223using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
224 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
225
Michalis Spyrou80943252019-01-10 17:19:50 +0000226DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
Gian Marco58c57942017-11-28 09:10:03 +0000227 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
228 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
229{
230 TensorShape shape_bias(shape[0]);
231
232 // Create tensors
233 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
234 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
235 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
236
237 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
238 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
239 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
240
241 // Create and configure function
242 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
243 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
244
245 // Validate valid region input and output
246 const ValidRegion valid_region = shape_to_valid_region(shape);
247 validate(in.info()->valid_region(), valid_region);
248 validate(out.info()->valid_region(), valid_region);
249
250 // Validate valid region bias
251 if(add_bias)
252 {
253 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
254 validate(bias.info()->valid_region(), valid_region_bias);
255 }
256
257 // Validate padding
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100258 const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
Gian Marco58c57942017-11-28 09:10:03 +0000259 validate(in.info()->padding(), padding);
260 validate(out.info()->padding(), padding);
261
262 if(add_bias)
263 {
264 validate(bias.info()->padding(), padding);
265 }
266}
267
268FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
269 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
270{
271 // Validate output
272 validate(CLAccessor(_target), _reference);
273}
274
275FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
276 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
277{
278 // Validate output
279 validate(CLAccessor(_target), _reference);
280}
281
282TEST_SUITE(BoundedReLu)
283FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
284 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
285{
286 // Validate output
287 validate(CLAccessor(_target), _reference);
288}
289
290FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
291 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
292{
293 // Validate output
294 validate(CLAccessor(_target), _reference);
295}
296TEST_SUITE_END() // BoundedReLu
297TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100298TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
Gian Marco58c57942017-11-28 09:10:03 +0000299
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100300const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
301 2)
302 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
303
304const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
305 2)
306 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
307
308using CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
309 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
310
311// *INDENT-OFF*
312// clang-format off
313DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
314 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),
315 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
316 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Wrong output data type
317 }),
318 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
319 TensorInfo(TensorShape(21U), 1, DataType::S32),
320 TensorInfo(TensorShape(21U), 1, DataType::S32),
321 })),
322 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
323 TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
324 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
325 })),
326 framework::dataset::make("Min",{ -205,
327 -60000,
328 -180,
329 })),
330 framework::dataset::make("Max",{ 205,
331 60000,
332 180,
333 })),
334 framework::dataset::make("Expected", { true, false, false })),
335 a_info, b_info, output_info, min, max, expected)
336{
337 // Lock tensors
338 Status status = CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(true),
339 &b_info.clone()->set_is_resizable(true),
340 &output_info.clone()->set_is_resizable(true),
341 min,
342 max);
343 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
344}
345// clang-format on
346// *INDENT-ON*
347FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
348 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
349{
350 // Validate output
351 validate(CLAccessor(_target), _reference);
352}
353TEST_SUITE(BoundedReLu)
354FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
355 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
356{
357 // Validate output
358 validate(CLAccessor(_target), _reference);
359}
360TEST_SUITE_END() // BoundedReLu
361TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
Gian Marco05288a22017-11-21 10:57:50 +0000362TEST_SUITE_END() // OutputStage
363TEST_SUITE_END() // GEMMLowp
364TEST_SUITE_END() // CL
365} // namespace validation
366} // namespace test
367} // namespace arm_compute