blob: 42bb2123bf48a1287c38de447fde1039514878be [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +01002 * Copyright (c) 2017-2018 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
28#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
29#include "tests/CL/CLAccessor.h"
30#include "tests/PaddingCalculator.h"
31#include "tests/datasets/LargeGEMMLowpDataset.h"
32#include "tests/datasets/ShapeDatasets.h"
33#include "tests/datasets/SmallGEMMLowpDataset.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/GEMMLowpFixture.h"
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46TEST_SUITE(CL)
47TEST_SUITE(GEMMLowp)
48
49TEST_SUITE(MatrixMultiplyCore)
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010050
Gian Marco05288a22017-11-21 10:57:50 +000051using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>;
52
53DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
54 shape_a, shape_b, shape_c, a_offset, b_offset)
55{
56 // Create tensors
57 CLTensor a = create_tensor<CLTensor>(shape_a, DataType::QASYMM8);
58 CLTensor b = create_tensor<CLTensor>(shape_b, DataType::QASYMM8);
59 CLTensor c = create_tensor<CLTensor>(shape_c, DataType::S32);
60
61 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
62 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
63
64 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
65 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
66 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
67
68 // Create and configure function
69 CLGEMMLowpMatrixMultiplyCore gemmlowp_mm;
70 gemmlowp_mm.configure(&a, &b, &c);
71}
72
73FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
74{
75 // Validate output
76 validate(CLAccessor(_target), _reference);
77}
78
79FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
80{
81 // Validate output
82 validate(CLAccessor(_target), _reference);
83}
84
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +010085TEST_SUITE(Output3D)
86using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>;
87FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset())
88{
89 // Validate output
90 validate(CLAccessor(_target), _reference);
91}
92FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpOutput3DDataset())
93{
94 // Validate output
95 validate(CLAccessor(_target), _reference);
96}
97TEST_SUITE_END() // Output3D
98
99TEST_SUITE(InputOutput3D)
100using CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, true, true>;
101FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpInputOutput3DDataset())
102{
103 // Validate output
104 validate(CLAccessor(_target), _reference);
105}
106FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpInputOutput3DDataset())
107{
108 // Validate output
109 validate(CLAccessor(_target), _reference);
110}
111TEST_SUITE_END() // InputOutput3D
Gian Marco05288a22017-11-21 10:57:50 +0000112TEST_SUITE_END() // MatrixMultiplyCore
113
114TEST_SUITE(OutputStage)
115TEST_SUITE(QuantizeDownInt32ToUint8Scale)
116
117const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
118 3)
119 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
120
121const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
122 2)
123 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true });
124
125using CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8Scale>;
126
127DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_cases),
128 shape, result_offset, result_mult_int, result_shift, min, max, add_bias)
129{
130 TensorShape shape_bias(shape[0]);
131
132 // Create tensors
133 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
134 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
135 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
136
137 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
138 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
139 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
140
141 // Create and configure function
142 CLGEMMLowpQuantizeDownInt32ToUint8Scale output_stage;
143 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max);
144
145 // Validate valid region input and output
146 const ValidRegion valid_region = shape_to_valid_region(shape);
147 validate(in.info()->valid_region(), valid_region);
148 validate(out.info()->valid_region(), valid_region);
149
150 // Validate valid region bias
151 if(add_bias)
152 {
153 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
154 validate(bias.info()->valid_region(), valid_region_bias);
155 }
156
157 // Validate padding
158 const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
159 validate(in.info()->padding(), padding);
160 validate(out.info()->padding(), padding);
161
162 if(add_bias)
163 {
164 validate(bias.info()->padding(), padding);
165 }
166}
167
Gian Marco58c57942017-11-28 09:10:03 +0000168FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000169{
170 // Validate output
171 validate(CLAccessor(_target), _reference);
172}
173
Gian Marco58c57942017-11-28 09:10:03 +0000174FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000175{
176 // Validate output
177 validate(CLAccessor(_target), _reference);
178}
179
180TEST_SUITE(BoundedReLu)
Gian Marco58c57942017-11-28 09:10:03 +0000181FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000182{
183 // Validate output
184 validate(CLAccessor(_target), _reference);
185}
186
Gian Marco58c57942017-11-28 09:10:03 +0000187FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
188 quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco05288a22017-11-21 10:57:50 +0000189{
190 // Validate output
191 validate(CLAccessor(_target), _reference);
192}
193TEST_SUITE_END() // BoundedReLu
Gian Marco05288a22017-11-21 10:57:50 +0000194TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
Gian Marco58c57942017-11-28 09:10:03 +0000195
196TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
197
198const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
199 2)
200 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
201
202const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
203 2)
204 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
205
206using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
207 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<CLTensor, CLAccessor, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
208
209DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()),
210 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
211 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
212{
213 TensorShape shape_bias(shape[0]);
214
215 // Create tensors
216 CLTensor in = create_tensor<CLTensor>(shape, DataType::S32);
217 CLTensor bias = create_tensor<CLTensor>(shape_bias, DataType::S32);
218 CLTensor out = create_tensor<CLTensor>(shape, DataType::QASYMM8);
219
220 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
221 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
222 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
223
224 // Create and configure function
225 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
226 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
227
228 // Validate valid region input and output
229 const ValidRegion valid_region = shape_to_valid_region(shape);
230 validate(in.info()->valid_region(), valid_region);
231 validate(out.info()->valid_region(), valid_region);
232
233 // Validate valid region bias
234 if(add_bias)
235 {
236 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
237 validate(bias.info()->valid_region(), valid_region_bias);
238 }
239
240 // Validate padding
241 const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
242 validate(in.info()->padding(), padding);
243 validate(out.info()->padding(), padding);
244
245 if(add_bias)
246 {
247 validate(bias.info()->padding(), padding);
248 }
249}
250
251FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
252 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
253{
254 // Validate output
255 validate(CLAccessor(_target), _reference);
256}
257
258FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
259 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
260{
261 // Validate output
262 validate(CLAccessor(_target), _reference);
263}
264
265TEST_SUITE(BoundedReLu)
266FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
267 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
268{
269 // Validate output
270 validate(CLAccessor(_target), _reference);
271}
272
273FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
274 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
275{
276 // Validate output
277 validate(CLAccessor(_target), _reference);
278}
279TEST_SUITE_END() // BoundedReLu
280TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
281
Gian Marco05288a22017-11-21 10:57:50 +0000282TEST_SUITE_END() // OutputStage
283TEST_SUITE_END() // GEMMLowp
284TEST_SUITE_END() // CL
285} // namespace validation
286} // namespace test
287} // namespace arm_compute