blob: ca7d50fd408876c9f670ca2491a2099f475b2d5f [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
Pablo Tello181e6512017-11-15 13:28:27 +000025#include "arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
27#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
Pablo Tello299025a2017-09-29 11:30:12 +010028#include "arm_compute/runtime/Tensor.h"
29#include "arm_compute/runtime/TensorAllocator.h"
30#include "tests/NEON/Accessor.h"
Gian Marco Iodiceab182122017-10-09 15:05:40 +010031#include "tests/NEON/Helper.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000032#include "tests/PaddingCalculator.h"
George Wort2d7e6832019-02-22 16:37:41 +000033#include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
Gian Marcofa4cacd2017-10-18 17:05:02 +010034#include "tests/datasets/LargeGEMMLowpDataset.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000035#include "tests/datasets/ShapeDatasets.h"
Gian Marcofa4cacd2017-10-18 17:05:02 +010036#include "tests/datasets/SmallGEMMLowpDataset.h"
Pablo Tello299025a2017-09-29 11:30:12 +010037#include "tests/framework/Asserts.h"
38#include "tests/framework/Macros.h"
39#include "tests/framework/datasets/Datasets.h"
40#include "tests/validation/Validation.h"
Pablo Tello181e6512017-11-15 13:28:27 +000041#include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h"
Pablo Tello299025a2017-09-29 11:30:12 +010042#include "tests/validation/fixtures/GEMMLowpFixture.h"
43
44namespace arm_compute
45{
46namespace test
47{
48namespace validation
49{
50namespace
51{
Pablo Tello181e6512017-11-15 13:28:27 +000052const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 16);
Pablo Tello299025a2017-09-29 11:30:12 +010053} // namespace
54
55TEST_SUITE(NEON)
Pablo Tello181e6512017-11-15 13:28:27 +000056TEST_SUITE(ASSEMBLY_MATRIX_MULTIPLY)
Michalis Spyrouf3dfa272017-11-21 17:52:12 +000057
58using NEGEMMAssemblyFixture_S8 = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore, int8_t>;
59using NEGEMMAssemblyFixture_U8 = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore, uint8_t>;
60
61TEST_SUITE(S8)
62FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_S8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
Pablo Tello181e6512017-11-15 13:28:27 +000063{
64 // Validate output
65 validate(Accessor(_target), _reference);
66}
67TEST_SUITE_END()
68
Michalis Spyrouf3dfa272017-11-21 17:52:12 +000069TEST_SUITE(U8)
70FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_U8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
71{
72 // Validate output
73 validate(Accessor(_target), _reference);
74}
75TEST_SUITE_END()
76TEST_SUITE_END()
77
Pablo Tello299025a2017-09-29 11:30:12 +010078TEST_SUITE(GEMMLowp)
Gian Marcoe75a02b2017-11-08 12:24:09 +000079TEST_SUITE(MatrixMultiplyCore)
80using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
Gian Marcofa4cacd2017-10-18 17:05:02 +010081
Gian Marcoe75a02b2017-11-08 12:24:09 +000082DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
83 shape_a, shape_b, shape_c, a_offset, b_offset)
Gian Marcofa4cacd2017-10-18 17:05:02 +010084{
85 // Create tensors
Gian Marcoe75a02b2017-11-08 12:24:09 +000086 Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8);
87 Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8);
88 Tensor c = create_tensor<Tensor>(shape_c, DataType::S32);
89
90 a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
91 b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
Gian Marcofa4cacd2017-10-18 17:05:02 +010092
93 ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
94 ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
95 ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
96
97 // Create and configure function
Gian Marcoe75a02b2017-11-08 12:24:09 +000098 NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
Gian Marco Iodice4b908652018-10-18 10:21:02 +010099 gemmlowp_mm.configure(&a, &b, nullptr, &c);
Gian Marcofa4cacd2017-10-18 17:05:02 +0100100}
101
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000102// *INDENT-OFF*
103// clang-format off
104DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
105 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100106 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Mismatching data type
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000107 TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
108 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
109 TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
110 }),
111 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
112 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
113 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
114 TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
115 TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)),
116 })),
117 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
118 TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
119 TensorInfo(TensorShape(33U, 13U), 1, DataType::S32),
120 TensorInfo(TensorShape(8U, 11U), 1, DataType::S32),
121 TensorInfo(TensorShape(64U, 32U), 1, DataType::S32),
122 })),
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000123 framework::dataset::make("Expected", { false, false, false, false, true })),
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000124 a_info, b_info, output_info, expected)
125{
126 // Lock tensors
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000127 Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false),
128 &b_info.clone()->set_is_resizable(false),
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100129 nullptr,
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000130 &output_info.clone()->set_is_resizable(false));
131 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000132}
133// clang-format on
134// *INDENT-ON*
135
Gian Marcoe75a02b2017-11-08 12:24:09 +0000136FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
Pablo Tello299025a2017-09-29 11:30:12 +0100137{
138 // Validate output
Gian Marcofa4cacd2017-10-18 17:05:02 +0100139 validate(Accessor(_target), _reference);
140}
141
Gian Marcoe75a02b2017-11-08 12:24:09 +0000142FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset())
Gian Marcofa4cacd2017-10-18 17:05:02 +0100143{
144 // Validate output
145 validate(Accessor(_target), _reference);
Pablo Tello299025a2017-09-29 11:30:12 +0100146}
Pablo Tello299025a2017-09-29 11:30:12 +0100147
George Wort2d7e6832019-02-22 16:37:41 +0000148using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
149TEST_SUITE(FusedOffsetOutput)
Manuel Bottini959c26d2019-12-02 16:22:35 +0000150FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000151 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +0000152{
153 // Validate output
154 validate(Accessor(_target), _reference);
155}
156
Manuel Bottini959c26d2019-12-02 16:22:35 +0000157FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpFusedOffsetOutputUint8Dataset(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000158 framework::dataset::make("DataType", { DataType::QASYMM8 })))
George Wort2d7e6832019-02-22 16:37:41 +0000159{
160 // Validate output
161 validate(Accessor(_target), _reference);
162}
163TEST_SUITE_END() // FusedOffsetOutput
Gian Marcoe75a02b2017-11-08 12:24:09 +0000164TEST_SUITE_END() // MatrixMultiplyCore
165
166TEST_SUITE(OutputStage)
167
Luca Foschiani4b869532020-02-13 15:07:36 +0000168TEST_SUITE(QuantizeDownInt32Scale)
169
170TEST_SUITE(QASYMM8)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000171
Gian Marco05288a22017-11-21 10:57:50 +0000172const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
Gian Marco6b77e912017-11-17 09:27:57 +0000173 3)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000174 * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
Gian Marco6b77e912017-11-17 09:27:57 +0000175
Gian Marco05288a22017-11-21 10:57:50 +0000176const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
Gian Marco6b77e912017-11-17 09:27:57 +0000177 2)
178 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
179
Luca Foschiani4b869532020-02-13 15:07:36 +0000180using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000181
Gian Marco7f0f7902017-12-07 09:26:56 +0000182// *INDENT-OFF*
183// clang-format off
184DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
185 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
Gian Marco7f0f7902017-12-07 09:26:56 +0000186 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
187 }),
188 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Gian Marco7f0f7902017-12-07 09:26:56 +0000189 TensorInfo(TensorShape(20U), 1, DataType::S32),
190 })),
191 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8),
Gian Marco7f0f7902017-12-07 09:26:56 +0000192 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
193 })),
194 framework::dataset::make("Min",{ 0,
Gian Marco7f0f7902017-12-07 09:26:56 +0000195 13,
196 })),
197 framework::dataset::make("Max",{ 205,
Gian Marco7f0f7902017-12-07 09:26:56 +0000198 180,
199 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000200 framework::dataset::make("Expected", { true, false })),
Gian Marco7f0f7902017-12-07 09:26:56 +0000201 a_info, b_info, output_info, min, max, expected)
202{
Luca Foschiani4b869532020-02-13 15:07:36 +0000203
204 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
205 output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
206 output_stage.gemmlowp_min_bound = min;
207 output_stage.gemmlowp_max_bound = max;
208 output_stage.output_data_type = DataType::QASYMM8;
209
Gian Marco7f0f7902017-12-07 09:26:56 +0000210 // Lock tensors
Luca Foschiani4b869532020-02-13 15:07:36 +0000211 Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false),
Gian Marco7f0f7902017-12-07 09:26:56 +0000212 &b_info.clone()->set_is_resizable(false),
213 &output_info.clone()->set_is_resizable(false),
Luca Foschiani4b869532020-02-13 15:07:36 +0000214 output_stage);
Gian Marco7f0f7902017-12-07 09:26:56 +0000215 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
216}
217// clang-format on
218// *INDENT-ON*
219
morgolockf1109542020-09-15 14:33:54 +0100220TEST_CASE(NoPaddingAdded, framework::DatasetMode::PRECOMMIT)
221{
222 Tensor input1 = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::S32);
223 Tensor input2 = create_tensor<Tensor>(TensorShape(21U, 1U), DataType::S32);
224 Tensor output = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::QASYMM8);
225
226 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
227 output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
228 output_stage.gemmlowp_min_bound = 0;
229 output_stage.gemmlowp_max_bound = 205;
230 output_stage.output_data_type = DataType::QASYMM8;
231
232
233 NEGEMMLowpOutputStage f;
234 f.configure(&input1, &input2, &output, output_stage);
235
236 // Validate padding is zero
237 validate(input1.info()->padding(), PaddingSize());
238 validate(input2.info()->padding(), PaddingSize());
239 validate(output.info()->padding(), PaddingSize());
240}
241
242
Luca Foschiani4b869532020-02-13 15:07:36 +0000243FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
Gian Marcoe75a02b2017-11-08 12:24:09 +0000244{
245 // Validate output
246 validate(Accessor(_target), _reference);
247}
248
Gian Marco6b77e912017-11-17 09:27:57 +0000249TEST_SUITE(BoundedReLu)
Luca Foschiani4b869532020-02-13 15:07:36 +0000250FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
Gian Marco6b77e912017-11-17 09:27:57 +0000251{
252 // Validate output
253 validate(Accessor(_target), _reference);
254}
Gian Marcoe75a02b2017-11-08 12:24:09 +0000255
Gian Marco6b77e912017-11-17 09:27:57 +0000256TEST_SUITE_END() // BoundedReLu
257
Luca Foschiani4b869532020-02-13 15:07:36 +0000258TEST_SUITE_END() // QASYMM8
259
260TEST_SUITE(QASYMM8_SIGNED)
261
262const auto quantize_down_int32_to_int8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
263 3)
264 * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
265
266const auto quantize_down_int32_to_int8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
267 2)
268 * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", -100, -98) * framework::dataset::make("max", 71, 74) * framework::dataset::make("addBias", { false, true });
269
270using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>;
271
272// *INDENT-OFF*
273// clang-format off
274DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
275 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
276 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
277 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
278 }),
279 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
280 TensorInfo(TensorShape(21U), 1, DataType::S32),
281 TensorInfo(TensorShape(20U), 1, DataType::S32),
282 })),
283 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
284 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
285 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
286 })),
287 framework::dataset::make("Min",{ -10,
288 -200,
289 -113,
290 })),
291 framework::dataset::make("Max",{ 105,
292 300,
293 -18,
294 })),
295 framework::dataset::make("Expected", { true, false, false })),
296 a_info, b_info, output_info, min, max, expected)
297{
298 GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
299 output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
300 output_stage.gemmlowp_min_bound = min;
301 output_stage.gemmlowp_max_bound = max;
302 output_stage.output_data_type = DataType::QASYMM8_SIGNED;
303
304 // Lock tensors
305 Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false),
306 &b_info.clone()->set_is_resizable(false),
307 &output_info.clone()->set_is_resizable(false),
308 output_stage);
309 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
310}
311// clang-format on
312// *INDENT-ON*
313
314FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_cases))
315{
316 // Validate output
317 validate(Accessor(_target), _reference);
318}
319
320TEST_SUITE(BoundedReLu)
321FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_relu_cases))
322{
323 // Validate output
324 validate(Accessor(_target), _reference);
325}
326
327TEST_SUITE_END() // BoundedReLu
328
329TEST_SUITE_END() // QASYMM8_SIGNED
330
331TEST_SUITE_END() // QuantizeDownInt32Scale
Gian Marco58c57942017-11-28 09:10:03 +0000332
333TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
334
335const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
336 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000337 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
Gian Marco58c57942017-11-28 09:10:03 +0000338
339const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
340 2)
341 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
342
343using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture =
344 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
345
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100346using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
347 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
348
Gian Marco7f0f7902017-12-07 09:26:56 +0000349// *INDENT-OFF*
350// clang-format off
351DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
352 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
Gian Marco7f0f7902017-12-07 09:26:56 +0000353 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
354 }),
355 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Gian Marco7f0f7902017-12-07 09:26:56 +0000356 TensorInfo(TensorShape(20U), 1, DataType::S32),
357 })),
358 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8),
Gian Marco7f0f7902017-12-07 09:26:56 +0000359 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
360 })),
361 framework::dataset::make("Min",{ 0,
Gian Marco7f0f7902017-12-07 09:26:56 +0000362 13,
363 })),
364 framework::dataset::make("Max",{ 205,
Gian Marco7f0f7902017-12-07 09:26:56 +0000365 180,
366 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000367 framework::dataset::make("Expected", { true, false })),
Gian Marco7f0f7902017-12-07 09:26:56 +0000368 a_info, b_info, output_info, min, max, expected)
369{
370 // Lock tensors
371 Status status = NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
372 &b_info.clone()->set_is_resizable(false),
373 &output_info.clone()->set_is_resizable(false),
374 min,
375 max);
376 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
377}
378// clang-format on
379// *INDENT-ON*
380
Michalis Spyrou5c9f0c42019-01-16 14:48:48 +0000381DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
Gian Marco58c57942017-11-28 09:10:03 +0000382 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
383 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
384{
385 TensorShape shape_bias(shape[0]);
386
387 // Create tensors
388 Tensor in = create_tensor<Tensor>(shape, DataType::S32);
389 Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
390 Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8);
391
392 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
393 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
394 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
395
396 // Create and configure function
397 NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
398 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
399
400 // Validate valid region input and output
401 const ValidRegion valid_region = shape_to_valid_region(shape);
402 validate(in.info()->valid_region(), valid_region);
403 validate(out.info()->valid_region(), valid_region);
404
405 // Validate valid region bias
406 if(add_bias)
407 {
408 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
409 validate(bias.info()->valid_region(), valid_region_bias);
410 }
411
412 // Validate padding
Gian Marco7f0f7902017-12-07 09:26:56 +0000413 const PaddingSize padding(0);
Gian Marco58c57942017-11-28 09:10:03 +0000414 validate(in.info()->padding(), padding);
415 validate(out.info()->padding(), padding);
416
417 if(add_bias)
418 {
419 validate(bias.info()->padding(), padding);
420 }
421}
422
423FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
424 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
425{
426 // Validate output
427 validate(Accessor(_target), _reference);
428}
429
430FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
431 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
432{
433 // Validate output
434 validate(Accessor(_target), _reference);
435}
436
437TEST_SUITE(BoundedReLu)
438FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
439 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
440{
441 // Validate output
442 validate(Accessor(_target), _reference);
443}
444
445FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
446 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
447{
448 // Validate output
449 validate(Accessor(_target), _reference);
450}
451TEST_SUITE_END() // BoundedReLu
452
453TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100454
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000455TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
456
457const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
458 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000459 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128) * framework::dataset::make("max", 128) * framework::dataset::make("addBias", { false, true });
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000460
461const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
462 2)
463 * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
464
465using NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture =
466 GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
467
468// *INDENT-OFF*
469// clang-format off
470DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
471 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::F32), // Invalid input data type
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000472 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
473 TensorInfo(TensorShape(21U, 13U), 1, DataType::S32),
474 }),
475 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000476 TensorInfo(TensorShape(20U), 1, DataType::S32),
477 TensorInfo(TensorShape(21U), 1, DataType::S32),
478 })),
479 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000480 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
481 TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED),
482 })),
483 framework::dataset::make("Min",{ -110,
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000484 -113,
485 -113,
486 })),
487 framework::dataset::make("Max",{ 87,
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000488 97,
489 97,
490 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000491 framework::dataset::make("Expected", { false, false, true })),
Georgios Pinitas448a81f2019-11-21 14:10:25 +0000492 a_info, b_info, output_info, min, max, expected)
493{
494 // Lock tensors
495 Status status = NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
496 &b_info.clone()->set_is_resizable(false),
497 &output_info.clone()->set_is_resizable(false),
498 min,
499 max);
500 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
501}
502// clang-format on
503// *INDENT-ON*
504
505DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
506 quantize_down_int32_to_int8_scale_by_fixedpoint_cases),
507 shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
508{
509 TensorShape shape_bias(shape[0]);
510
511 // Create tensors
512 Tensor in = create_tensor<Tensor>(shape, DataType::S32);
513 Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
514 Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8_SIGNED);
515
516 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
517 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
518 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
519
520 // Create and configure function
521 NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage;
522 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
523
524 // Validate valid region input and output
525 const ValidRegion valid_region = shape_to_valid_region(shape);
526 validate(in.info()->valid_region(), valid_region);
527 validate(out.info()->valid_region(), valid_region);
528
529 // Validate valid region bias
530 if(add_bias)
531 {
532 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
533 validate(bias.info()->valid_region(), valid_region_bias);
534 }
535
536 // Validate padding
537 const PaddingSize padding(0);
538 validate(in.info()->padding(), padding);
539 validate(out.info()->padding(), padding);
540
541 if(add_bias)
542 {
543 validate(bias.info()->padding(), padding);
544 }
545}
546FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
547 quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
548{
549 // Validate output
550 validate(Accessor(_target), _reference);
551}
552
553TEST_SUITE(BoundedReLu)
554FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
555 quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
556{
557 // Validate output
558 validate(Accessor(_target), _reference);
559}
560TEST_SUITE_END() // BoundedReLu
561TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint
562
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100563TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
564
565const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
566 2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000567 * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100568
569const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
570 2)
571 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000572const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823,
573 1073741825)
574 * framework::dataset::make("result_shift", -3,
575 -2)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000576 * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
Manuel Bottini07263982019-10-17 18:37:26 +0100577
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000578const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600,
579 254601602)
580 * framework::dataset::make("result_shift", -3,
581 -1)
582 * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100583
584using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture =
585 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
586
587// *INDENT-OFF*
588// clang-format off
589DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
590 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100591 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
592 }),
593 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32),
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100594 TensorInfo(TensorShape(20U), 1, DataType::S32),
595 })),
596 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16),
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100597 TensorInfo(TensorShape(20U, 13U), 1, DataType::S32),
598 })),
599 framework::dataset::make("Min",{ -205,
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100600 -180,
601 })),
602 framework::dataset::make("Max",{ 205,
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100603 180,
604 })),
Giorgio Arena1856ff72020-02-07 13:46:45 +0000605 framework::dataset::make("Expected", { true, false })),
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100606 a_info, b_info, output_info, min, max, expected)
607{
608 // Lock tensors
609 Status status = NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
610 &b_info.clone()->set_is_resizable(false),
611 &output_info.clone()->set_is_resizable(false),
612 min,
613 max);
614 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
615}
616// clang-format on
617// *INDENT-ON*
618
619DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
620 quantize_down_int32_to_int16_scale_by_fixedpoint_cases),
621 shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias)
622{
623 TensorShape shape_bias(shape[0]);
624
625 // Create tensors
626 Tensor in = create_tensor<Tensor>(shape, DataType::S32);
627 Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
628 Tensor out = create_tensor<Tensor>(shape, DataType::QSYMM16);
629
630 ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
631 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
632 ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
633
634 // Create and configure function
635 NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint output_stage;
636 output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, min, max);
637
638 // Validate valid region input and output
639 const ValidRegion valid_region = shape_to_valid_region(shape);
640 validate(in.info()->valid_region(), valid_region);
641 validate(out.info()->valid_region(), valid_region);
642
643 // Validate valid region bias
644 if(add_bias)
645 {
646 const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
647 validate(bias.info()->valid_region(), valid_region_bias);
648 }
649
650 // Validate padding
651 const PaddingSize padding(0);
652 validate(in.info()->padding(), padding);
653 validate(out.info()->padding(), padding);
654
655 if(add_bias)
656 {
657 validate(bias.info()->padding(), padding);
658 }
659}
Manuel Bottini07263982019-10-17 18:37:26 +0100660TEST_SUITE(NoRelu)
661TEST_SUITE(MultSmallerEq1)
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100662FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
663 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
664{
665 // Validate output
666 validate(Accessor(_target), _reference);
667}
Manuel Bottini07263982019-10-17 18:37:26 +0100668TEST_SUITE_END() // MultSmallerEq1
669TEST_SUITE(MultGreater1)
670FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
671 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
672{
673 // Validate output
674 validate(Accessor(_target), _reference);
675}
676TEST_SUITE_END() // MultGreater1
677TEST_SUITE_END() // NoRelu
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100678TEST_SUITE(BoundedReLu)
Manuel Bottini07263982019-10-17 18:37:26 +0100679TEST_SUITE(MultSmallerEq1)
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100680FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
681 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
682{
683 // Validate output
684 validate(Accessor(_target), _reference);
685}
Manuel Bottini07263982019-10-17 18:37:26 +0100686TEST_SUITE_END() // MultSmallerEq1
687TEST_SUITE(MultGreater1)
688FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
689 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
690{
691 // Validate output
692 validate(Accessor(_target), _reference);
693}
694TEST_SUITE_END() // MultGreater1
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100695TEST_SUITE_END() // BoundedReLu
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100696TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
Gian Marcoe75a02b2017-11-08 12:24:09 +0000697TEST_SUITE_END() // OutputStage
Gian Marcoe75a02b2017-11-08 12:24:09 +0000698TEST_SUITE_END() // GEMMLowp
699TEST_SUITE_END() // NEON
Pablo Tello299025a2017-09-29 11:30:12 +0100700} // namespace validation
701} // namespace test
702} // namespace arm_compute