blob: 02f0bfda1ef3fed5e948561e33f1d73edc1a9f3f [file] [log] [blame]
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +00001/*
Gunes Bayir2aec5f12024-01-23 17:19:44 +00002 * Copyright (c) 2023-2024 Arm Limited.
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
26
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000027#include "tests/datasets/LargeMatMulDataset.h"
28#include "tests/datasets/SmallMatMulDataset.h"
Renato Arantes36a75da2024-01-26 17:31:18 +000029#include "tests/framework/Asserts.h"
30#include "tests/framework/datasets/Datasets.h"
31#include "tests/framework/Macros.h"
32#include "tests/NEON/Accessor.h"
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000033#include "tests/validation/fixtures/MatMulFixture.h"
Renato Arantes36a75da2024-01-26 17:31:18 +000034#include "tests/validation/Validation.h"
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000035
36namespace arm_compute
37{
38namespace test
39{
40namespace validation
41{
Viet-Hoa Doc85edf12023-09-01 16:48:17 +010042using framework::dataset::make;
43
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000044TEST_SUITE(NEON)
45TEST_SUITE(MatMul)
46
Renato Arantes36a75da2024-01-26 17:31:18 +000047constexpr AbsoluteTolerance<float> tolerance_fp32(
48 0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
49const AbsoluteTolerance<half> tolerance_fp16(half(0.1f));
Ramy Elgammalaf150762023-04-25 17:19:27 +010050#ifdef __aarch64__
Gunes Bayir43ba0dd2024-03-19 15:31:11 +000051constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8(1);
52constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8_signed(1);
Ramy Elgammalaf150762023-04-25 17:19:27 +010053#endif // __aarch64__
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000054
55// clang-format off
56// *INDENT-OFF*
57// Validation Tests
Viet-Hoa Doc85edf12023-09-01 16:48:17 +010058DATA_TEST_CASE(Validate, framework::DatasetMode::ALL,
59 zip(
60 make("InputAInfo", {
61 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Mismatching datatype
62 TensorInfo(TensorShape(9U, 6U), 1, DataType::S32), // Unsupported datatypes
63 TensorInfo(TensorShape(9U, 6U, 2U), 1, DataType::F32), // Broadcasting in batch dimension not supported
64 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Invalid shape for multiplication
65 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32),
66 TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32),
67 TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), // Tensors are not dynamic
68 TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8),
69 TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED),
70 TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED), // Mismatching data type
71 }),
72 make("InputBInfo", {
73 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8),
74 TensorInfo(TensorShape(5U, 9U), 1, DataType::S32),
75 TensorInfo(TensorShape(5U, 9U, 1U), 1, DataType::F32),
76 TensorInfo(TensorShape(5U, 12U), 1, DataType::F32),
77 TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
78 TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
79 TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
80 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8),
81 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED),
82 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED),
83 }),
84 make("OutputInfo", {
85 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
86 TensorInfo(TensorShape(5U, 6U), 1, DataType::S32),
87 TensorInfo(TensorShape(5U, 6U, 2U), 1, DataType::F32),
88 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
89 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
90 TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
91 TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
92 TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8),
93 TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8_SIGNED),
94 TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8),
95 }),
96 make("TensorIsConst", {false, false, false, false, false , false, true, false, false, false}),
97 make("Expected", { false, false, false, false, true, true, false, true, true, false })),
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000098 a_info, b_info, output_info, are_tensors_const, expected)
99{
100 TensorInfo a{a_info};
101 TensorInfo b{b_info};
102 a.set_are_values_constant(are_tensors_const);
103 b.set_are_values_constant(are_tensors_const);
104 Status status = NEMatMul::validate(&a,
105 &b,
106 &output_info,
107 MatMulInfo(),
108 CpuMatMulSettings());
109 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
110}
111// *INDENT-ON*
112// clang-format on
113
114// Generic Template
115template <typename T>
116using NEMatMulFixture = MatMulValidationWithActivationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
117
118// Fast math Template
119template <typename T>
120using NEMatMulFastMathFixture = MatMulGenericValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
121
122template <typename T>
Renato Arantes36a75da2024-01-26 17:31:18 +0000123using NEMatMulFixedFormatFixture = MatMulFixedFormatFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
124
125template <typename T>
126using NEMatMulDynamicTensorsFixture =
127 MatMulValidationWithDynamicTensorsFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000128
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100129template <typename T>
130using NEQuantizedMatMulFixture = QuantizedMatMulValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
131
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000132TEST_SUITE(Float)
133TEST_SUITE(FP32)
Renato Arantes36a75da2024-01-26 17:31:18 +0000134FIXTURE_DATA_TEST_CASE(RunSmall,
135 NEMatMulFixture<float>,
136 framework::DatasetMode::PRECOMMIT,
137 combine(datasets::SmallMatMulDataset(),
138 make("TransposeA", {false, true}),
139 make("TransposeB", {false, true}),
140 make("DataType", DataType::F32),
141 make("ActivationInfo",
142{
143 ActivationLayerInfo(),
144 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
145})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000146{
147 // Validate output
148 validate(Accessor(_target), _reference, tolerance_fp32);
149}
Renato Arantes36a75da2024-01-26 17:31:18 +0000150FIXTURE_DATA_TEST_CASE(RunLarge,
151 NEMatMulFixture<float>,
152 framework::DatasetMode::NIGHTLY,
153 combine(datasets::LargeMatMulDataset(),
154 make("TransposeA", {false, true}),
155 make("TransposeB", {false, true}),
156 make("DataType", DataType::F32),
157 make("ActivationInfo",
158{
159 ActivationLayerInfo(),
160 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
161})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000162{
163 // Validate output
164 validate(Accessor(_target), _reference, tolerance_fp32);
165}
Renato Arantes36a75da2024-01-26 17:31:18 +0000166FIXTURE_DATA_TEST_CASE(RunHighDimensions,
167 NEMatMulFixture<float>,
168 framework::DatasetMode::NIGHTLY,
169 combine(datasets::HighDimensionalMatMulDataset(),
170 make("TransposeA", {false, true}),
171 make("TransposeB", {false, true}),
172 make("DataType", DataType::F32),
173 make("ActivationInfo",
174{
175 ActivationLayerInfo(),
176 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
177})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000178{
179 // Validate output
180 validate(Accessor(_target), _reference, tolerance_fp32);
181}
182
Renato Arantes36a75da2024-01-26 17:31:18 +0000183FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors,
184 NEMatMulDynamicTensorsFixture<float>,
185 framework::DatasetMode::PRECOMMIT,
186 combine(datasets::SmallMatMulDataset(),
187 make("TransposeA", {false, true}),
188 make("TransposeB", {false, true}),
189 make("DataType", DataType::F32),
190 make("ActivationInfo",
191{
192 ActivationLayerInfo(),
193 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
194}),
195make("NumberOfRuns", 5)))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000196{
197 // Validate output
198 validate(Accessor(_target), _reference, tolerance_fp32);
199}
200TEST_SUITE_END() // FP32
201
202#ifdef ARM_COMPUTE_ENABLE_BF16
203/* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */
Gunes Bayir2aec5f12024-01-23 17:19:44 +0000204constexpr AbsoluteTolerance<float> tolerance_bf16(0.02f);
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000205TEST_SUITE(BF16)
Renato Arantes36a75da2024-01-26 17:31:18 +0000206FIXTURE_DATA_TEST_CASE(RunSmall,
207 NEMatMulFastMathFixture<float>,
208 framework::DatasetMode::PRECOMMIT,
209 combine(datasets::SmallMatMulDataset(),
210 make("TransposeA", {false, true}),
211 make("TransposeB", {false, true}),
212 make("DataType", DataType::F32),
213 make("ActivationInfo", {ActivationLayerInfo()}),
214 make("RunTimes", {0}),
215 make("Settings", {CpuMatMulSettings().fast_math(true)}),
216 make("LhsQInfo", {QuantizationInfo()}),
217 make("RhsQInfo", {QuantizationInfo()}),
218 make("OutQInfo", {QuantizationInfo()})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000219{
220 // Validate output
221 validate(Accessor(_target), _reference, tolerance_bf16);
222}
Gunes Bayir2aec5f12024-01-23 17:19:44 +0000223
Renato Arantes36a75da2024-01-26 17:31:18 +0000224FIXTURE_DATA_TEST_CASE(RunTinyFixedFormat,
225 NEMatMulFixedFormatFixture<bfloat16>,
226 framework::DatasetMode::PRECOMMIT,
227 combine(datasets::TinyMatMulDataset(),
228 make("TransposeA", {false}),
229 make("TransposeB", {false}),
230 make("DataType", DataType::BFLOAT16),
231 make("ActivationInfo", {ActivationLayerInfo()}),
232 make("RunTimes", {0}),
233 make("Settings", {CpuMatMulSettings().fast_math(true).fixed_format(true)}),
234 make("LhsQInfo", {QuantizationInfo()}),
235 make("RhsQInfo", {QuantizationInfo()}),
236 make("OutQInfo", {QuantizationInfo()})))
237{
238 if (CPUInfo::get().has_bf16())
239 {
240 // Validate output
241 validate(Accessor(_target), _reference, tolerance_bf16);
242 }
243}
244
245FIXTURE_DATA_TEST_CASE(RunLarge,
246 NEMatMulFastMathFixture<float>,
247 framework::DatasetMode::NIGHTLY,
248 combine(datasets::LargeMatMulDataset(),
249 make("TransposeA", {false, true}),
250 make("TransposeB", {false, true}),
251 make("DataType", DataType::F32),
252 make("ActivationInfo", {ActivationLayerInfo()}),
253 make("RunTimes", {0}),
254 make("Settings", {CpuMatMulSettings().fast_math(true)}),
255 make("LhsQInfo", {QuantizationInfo()}),
256 make("RhsQInfo", {QuantizationInfo()}),
257 make("OutQInfo", {QuantizationInfo()})))
Gunes Bayir2aec5f12024-01-23 17:19:44 +0000258{
259 // Validate output
260 validate(Accessor(_target), _reference, tolerance_bf16, 0.01 /* tolerance_num */);
261}
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000262TEST_SUITE_END() // BF16
263#endif /* ARM_COMPUTE_ENABLE_BF16 */
264
265#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
266TEST_SUITE(FP16)
Renato Arantes36a75da2024-01-26 17:31:18 +0000267FIXTURE_DATA_TEST_CASE(RunSmall,
268 NEMatMulFixture<half>,
269 framework::DatasetMode::PRECOMMIT,
270 combine(datasets::SmallMatMulDataset(),
271 make("TransposeA", {false, true}),
272 make("TransposeB", {false, true}),
273 make("DataType", DataType::F16),
274 make("ActivationInfo",
275{
276 ActivationLayerInfo(),
277 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
278})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000279{
280 // Validate output
281 validate(Accessor(_target), _reference, tolerance_fp16);
282}
Renato Arantes36a75da2024-01-26 17:31:18 +0000283FIXTURE_DATA_TEST_CASE(RunLarge,
284 NEMatMulFixture<half>,
285 framework::DatasetMode::NIGHTLY,
286 combine(datasets::LargeMatMulDataset(),
287 make("TransposeA", {false, true}),
288 make("TransposeB", {false, true}),
289 make("DataType", DataType::F16),
290 make("ActivationInfo",
291{
292 ActivationLayerInfo(),
293 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
294})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000295{
296 // Validate output
297 validate(Accessor(_target), _reference, tolerance_fp16);
298}
Renato Arantes36a75da2024-01-26 17:31:18 +0000299FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors,
300 NEMatMulDynamicTensorsFixture<half>,
301 framework::DatasetMode::PRECOMMIT,
302 combine(datasets::SmallMatMulDataset(),
303 make("TransposeA", {false, true}),
304 make("TransposeB", {false, true}),
305 make("DataType", DataType::F16),
306 make("ActivationInfo",
307{
308 ActivationLayerInfo(),
309 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
310}),
311make("NumberOfRuns", 5)))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000312{
313 // Validate output
314 validate(Accessor(_target), _reference, tolerance_fp16);
315}
316TEST_SUITE_END() // FP16
317#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
318
319TEST_SUITE_END() // Float
320
Ramy Elgammal05a65e32023-04-24 01:58:21 +0100321#ifdef __aarch64__ // All the GeMM CPU assembly kernels for integer datatypes require aarch64
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100322TEST_SUITE(Quantized)
323
324TEST_SUITE(QASYMM8)
325
Renato Arantes36a75da2024-01-26 17:31:18 +0000326FIXTURE_DATA_TEST_CASE(RunSmall,
327 NEQuantizedMatMulFixture<uint8_t>,
328 framework::DatasetMode::PRECOMMIT,
329 combine(datasets::SmallMatMulDataset(),
330 make("TransposeA", {false, true}),
331 make("TransposeB", {false, true}),
332 make("DataType", DataType::QASYMM8),
333 make("ActivationInfo",
334{
335 ActivationLayerInfo(),
336 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
337}),
338make("NumberOfExtraRuns", {0, 1}),
339make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
340make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}),
341make("OutQInfo", {QuantizationInfo(1.f, 2)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100342{
343 // Validate output
344 validate(Accessor(_target), _reference, tolerance_qasymm8);
345}
346
Renato Arantes36a75da2024-01-26 17:31:18 +0000347FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation,
348 NEQuantizedMatMulFixture<uint8_t>,
349 framework::DatasetMode::NIGHTLY,
350 combine(datasets::SmallerMatMulDataset(),
351 make("TransposeA", {false, true}),
352 make("TransposeB", {false, true}),
353 make("DataType", DataType::QASYMM8),
354 make("ActivationInfo",
355{
356 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU),
357 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
358}),
359make("NumberOfExtraRuns", {0, 1}),
360make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
361make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}),
362make("OutQInfo", {QuantizationInfo(1.f, 2)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100363{
364 // Validate output
365 validate(Accessor(_target), _reference, tolerance_qasymm8);
366}
367
Renato Arantes36a75da2024-01-26 17:31:18 +0000368FIXTURE_DATA_TEST_CASE(RunLarge,
369 NEQuantizedMatMulFixture<uint8_t>,
370 framework::DatasetMode::NIGHTLY,
371 combine(datasets::LargeMatMulDataset(),
372 make("TransposeA", {false, true}),
373 make("TransposeB", {false, true}),
374 make("DataType", DataType::QASYMM8),
375 make("ActivationInfo",
376{
377 ActivationLayerInfo(),
378 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
379}),
380make("NumberOfExtraRuns", {0, 1}),
381make("LhsQInfo", {QuantizationInfo(1.f / 100, 1)}),
382make("RhsQInfo", {QuantizationInfo(1.f / 200, -1)}),
383make("OutQInfo", {QuantizationInfo(1.f, 2)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100384{
385 // Validate output
386 validate(Accessor(_target), _reference, tolerance_qasymm8);
387}
388
389TEST_SUITE_END() // QASYMM8
390
391TEST_SUITE(QASYMM8_SIGNED)
392
Renato Arantes36a75da2024-01-26 17:31:18 +0000393FIXTURE_DATA_TEST_CASE(RunSmall,
394 NEQuantizedMatMulFixture<int8_t>,
395 framework::DatasetMode::PRECOMMIT,
396 combine(datasets::SmallMatMulDataset(),
397 make("TransposeA", {false, true}),
398 make("TransposeB", {false, true}),
399 make("DataType", DataType::QASYMM8_SIGNED),
400 make("ActivationInfo",
401{
402 ActivationLayerInfo(),
403 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
404}),
405make("NumberOfExtraRuns", {0, 1}),
406make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}),
407make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
408make("OutQInfo", {QuantizationInfo(1.f, 1)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100409{
410 // Validate output
411 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
412}
413
Renato Arantes36a75da2024-01-26 17:31:18 +0000414FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation,
415 NEQuantizedMatMulFixture<int8_t>,
416 framework::DatasetMode::NIGHTLY,
417 combine(datasets::SmallerMatMulDataset(),
418 make("TransposeA", {false, true}),
419 make("TransposeB", {false, true}),
420 make("DataType", DataType::QASYMM8_SIGNED),
421 make("ActivationInfo",
422{
423 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU),
424 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
425}),
426make("NumberOfExtraRuns", {0, 1}),
427make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}),
428make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
429make("OutQInfo", {QuantizationInfo(1.f, 1)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100430{
431 // Validate output
432 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
433}
434
Renato Arantes36a75da2024-01-26 17:31:18 +0000435FIXTURE_DATA_TEST_CASE(RunLarge,
436 NEQuantizedMatMulFixture<int8_t>,
437 framework::DatasetMode::NIGHTLY,
438 combine(datasets::LargeMatMulDataset(),
439 make("TransposeA", {false, true}),
440 make("TransposeB", {false, true}),
441 make("DataType", DataType::QASYMM8_SIGNED),
442 make("ActivationInfo",
443{
444 ActivationLayerInfo(),
445 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
446}),
447make("NumberOfExtraRuns", {0, 1}),
448make("LhsQInfo", {QuantizationInfo(1.f / 150, -2)}),
449make("RhsQInfo", {QuantizationInfo(1.f / 250, 1)}),
450make("OutQInfo", {QuantizationInfo(1.f, 1)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100451{
452 // Validate output
453 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
454}
455
456TEST_SUITE_END() // QASYMM8_SIGNED
457
458TEST_SUITE_END() // Quantized
Renato Arantes36a75da2024-01-26 17:31:18 +0000459#endif // __aarch64__
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100460
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000461TEST_SUITE_END() // MatMul
462TEST_SUITE_END() // NEON
463} // namespace validation
464} // namespace test
465} // namespace arm_compute