blob: f22bd9e86a4419bd4aff5c2dd92e75763685c5df [file] [log] [blame]
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +00001/*
Gunes Bayir2aec5f12024-01-23 17:19:44 +00002 * Copyright (c) 2023-2024 Arm Limited.
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
26
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000027#include "tests/datasets/LargeMatMulDataset.h"
28#include "tests/datasets/SmallMatMulDataset.h"
Renato Arantes36a75da2024-01-26 17:31:18 +000029#include "tests/framework/Asserts.h"
30#include "tests/framework/datasets/Datasets.h"
31#include "tests/framework/Macros.h"
32#include "tests/NEON/Accessor.h"
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000033#include "tests/validation/fixtures/MatMulFixture.h"
Renato Arantes36a75da2024-01-26 17:31:18 +000034#include "tests/validation/Validation.h"
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000035
36namespace arm_compute
37{
38namespace test
39{
40namespace validation
41{
Viet-Hoa Doc85edf12023-09-01 16:48:17 +010042using framework::dataset::make;
43
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000044TEST_SUITE(NEON)
45TEST_SUITE(MatMul)
46
Renato Arantes36a75da2024-01-26 17:31:18 +000047constexpr AbsoluteTolerance<float> tolerance_fp32(
48 0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
49const AbsoluteTolerance<half> tolerance_fp16(half(0.1f));
Ramy Elgammalaf150762023-04-25 17:19:27 +010050#ifdef __aarch64__
Gunes Bayir43ba0dd2024-03-19 15:31:11 +000051constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8(1);
52constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8_signed(1);
Ramy Elgammalaf150762023-04-25 17:19:27 +010053#endif // __aarch64__
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000054
55// clang-format off
56// *INDENT-OFF*
57// Validation Tests
Viet-Hoa Doc85edf12023-09-01 16:48:17 +010058DATA_TEST_CASE(Validate, framework::DatasetMode::ALL,
59 zip(
60 make("InputAInfo", {
61 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Mismatching datatype
62 TensorInfo(TensorShape(9U, 6U), 1, DataType::S32), // Unsupported datatypes
63 TensorInfo(TensorShape(9U, 6U, 2U), 1, DataType::F32), // Broadcasting in batch dimension not supported
64 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Invalid shape for multiplication
65 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32),
66 TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32),
67 TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), // Tensors are not dynamic
68 TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8),
69 TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED),
70 TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED), // Mismatching data type
71 }),
72 make("InputBInfo", {
73 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8),
74 TensorInfo(TensorShape(5U, 9U), 1, DataType::S32),
75 TensorInfo(TensorShape(5U, 9U, 1U), 1, DataType::F32),
76 TensorInfo(TensorShape(5U, 12U), 1, DataType::F32),
77 TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
78 TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
79 TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
80 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8),
81 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED),
82 TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED),
83 }),
84 make("OutputInfo", {
85 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
86 TensorInfo(TensorShape(5U, 6U), 1, DataType::S32),
87 TensorInfo(TensorShape(5U, 6U, 2U), 1, DataType::F32),
88 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
89 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
90 TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
91 TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
92 TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8),
93 TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8_SIGNED),
94 TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8),
95 }),
96 make("TensorIsConst", {false, false, false, false, false , false, true, false, false, false}),
97 make("Expected", { false, false, false, false, true, true, false, true, true, false })),
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000098 a_info, b_info, output_info, are_tensors_const, expected)
99{
100 TensorInfo a{a_info};
101 TensorInfo b{b_info};
102 a.set_are_values_constant(are_tensors_const);
103 b.set_are_values_constant(are_tensors_const);
104 Status status = NEMatMul::validate(&a,
105 &b,
106 &output_info,
107 MatMulInfo(),
108 CpuMatMulSettings());
109 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
110}
111// *INDENT-ON*
112// clang-format on
113
114// Generic Template
115template <typename T>
116using NEMatMulFixture = MatMulValidationWithActivationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
117
118// Fast math Template
119template <typename T>
120using NEMatMulFastMathFixture = MatMulGenericValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
121
122template <typename T>
Renato Arantes36a75da2024-01-26 17:31:18 +0000123using NEMatMulFixedFormatFixture = MatMulFixedFormatFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
124
125template <typename T>
126using NEMatMulDynamicTensorsFixture =
127 MatMulValidationWithDynamicTensorsFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000128
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100129template <typename T>
130using NEQuantizedMatMulFixture = QuantizedMatMulValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
131
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000132TEST_SUITE(Float)
133TEST_SUITE(FP32)
Renato Arantes36a75da2024-01-26 17:31:18 +0000134FIXTURE_DATA_TEST_CASE(RunSmall,
135 NEMatMulFixture<float>,
136 framework::DatasetMode::PRECOMMIT,
137 combine(datasets::SmallMatMulDataset(),
138 make("TransposeA", {false, true}),
139 make("TransposeB", {false, true}),
140 make("DataType", DataType::F32),
141 make("ActivationInfo",
142{
143 ActivationLayerInfo(),
144 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
145})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000146{
147 // Validate output
148 validate(Accessor(_target), _reference, tolerance_fp32);
149}
Renato Arantes36a75da2024-01-26 17:31:18 +0000150FIXTURE_DATA_TEST_CASE(RunLarge,
151 NEMatMulFixture<float>,
152 framework::DatasetMode::NIGHTLY,
153 combine(datasets::LargeMatMulDataset(),
154 make("TransposeA", {false, true}),
155 make("TransposeB", {false, true}),
156 make("DataType", DataType::F32),
157 make("ActivationInfo",
158{
159 ActivationLayerInfo(),
160 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
161})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000162{
163 // Validate output
164 validate(Accessor(_target), _reference, tolerance_fp32);
165}
Renato Arantes36a75da2024-01-26 17:31:18 +0000166FIXTURE_DATA_TEST_CASE(RunHighDimensions,
167 NEMatMulFixture<float>,
168 framework::DatasetMode::NIGHTLY,
169 combine(datasets::HighDimensionalMatMulDataset(),
170 make("TransposeA", {false, true}),
171 make("TransposeB", {false, true}),
172 make("DataType", DataType::F32),
173 make("ActivationInfo",
174{
175 ActivationLayerInfo(),
176 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
177})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000178{
179 // Validate output
180 validate(Accessor(_target), _reference, tolerance_fp32);
181}
182
Renato Arantes36a75da2024-01-26 17:31:18 +0000183FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors,
184 NEMatMulDynamicTensorsFixture<float>,
185 framework::DatasetMode::PRECOMMIT,
186 combine(datasets::SmallMatMulDataset(),
187 make("TransposeA", {false, true}),
188 make("TransposeB", {false, true}),
189 make("DataType", DataType::F32),
190 make("ActivationInfo",
191{
192 ActivationLayerInfo(),
193 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
194}),
195make("NumberOfRuns", 5)))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000196{
197 // Validate output
198 validate(Accessor(_target), _reference, tolerance_fp32);
199}
200TEST_SUITE_END() // FP32
201
202#ifdef ARM_COMPUTE_ENABLE_BF16
203/* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */
Gunes Bayir2aec5f12024-01-23 17:19:44 +0000204constexpr AbsoluteTolerance<float> tolerance_bf16(0.02f);
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000205TEST_SUITE(BF16)
Renato Arantes36a75da2024-01-26 17:31:18 +0000206FIXTURE_DATA_TEST_CASE(RunSmall,
207 NEMatMulFastMathFixture<float>,
208 framework::DatasetMode::PRECOMMIT,
209 combine(datasets::SmallMatMulDataset(),
210 make("TransposeA", {false, true}),
211 make("TransposeB", {false, true}),
212 make("DataType", DataType::F32),
213 make("ActivationInfo", {ActivationLayerInfo()}),
214 make("RunTimes", {0}),
215 make("Settings", {CpuMatMulSettings().fast_math(true)}),
216 make("LhsQInfo", {QuantizationInfo()}),
217 make("RhsQInfo", {QuantizationInfo()}),
218 make("OutQInfo", {QuantizationInfo()})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000219{
220 // Validate output
221 validate(Accessor(_target), _reference, tolerance_bf16);
222}
Gunes Bayir2aec5f12024-01-23 17:19:44 +0000223
Renato Arantes49089812024-03-22 09:20:23 +0000224#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
Renato Arantes36a75da2024-01-26 17:31:18 +0000225FIXTURE_DATA_TEST_CASE(RunTinyFixedFormat,
226 NEMatMulFixedFormatFixture<bfloat16>,
227 framework::DatasetMode::PRECOMMIT,
228 combine(datasets::TinyMatMulDataset(),
229 make("TransposeA", {false}),
230 make("TransposeB", {false}),
231 make("DataType", DataType::BFLOAT16),
232 make("ActivationInfo", {ActivationLayerInfo()}),
233 make("RunTimes", {0}),
234 make("Settings", {CpuMatMulSettings().fast_math(true).fixed_format(true)}),
235 make("LhsQInfo", {QuantizationInfo()}),
236 make("RhsQInfo", {QuantizationInfo()}),
237 make("OutQInfo", {QuantizationInfo()})))
238{
239 if (CPUInfo::get().has_bf16())
240 {
241 // Validate output
242 validate(Accessor(_target), _reference, tolerance_bf16);
243 }
244}
Renato Arantes49089812024-03-22 09:20:23 +0000245#endif /* ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS */
Renato Arantes36a75da2024-01-26 17:31:18 +0000246
247FIXTURE_DATA_TEST_CASE(RunLarge,
248 NEMatMulFastMathFixture<float>,
249 framework::DatasetMode::NIGHTLY,
250 combine(datasets::LargeMatMulDataset(),
251 make("TransposeA", {false, true}),
252 make("TransposeB", {false, true}),
253 make("DataType", DataType::F32),
254 make("ActivationInfo", {ActivationLayerInfo()}),
255 make("RunTimes", {0}),
256 make("Settings", {CpuMatMulSettings().fast_math(true)}),
257 make("LhsQInfo", {QuantizationInfo()}),
258 make("RhsQInfo", {QuantizationInfo()}),
259 make("OutQInfo", {QuantizationInfo()})))
Gunes Bayir2aec5f12024-01-23 17:19:44 +0000260{
261 // Validate output
262 validate(Accessor(_target), _reference, tolerance_bf16, 0.01 /* tolerance_num */);
263}
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000264TEST_SUITE_END() // BF16
265#endif /* ARM_COMPUTE_ENABLE_BF16 */
266
267#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
268TEST_SUITE(FP16)
Renato Arantes36a75da2024-01-26 17:31:18 +0000269FIXTURE_DATA_TEST_CASE(RunSmall,
270 NEMatMulFixture<half>,
271 framework::DatasetMode::PRECOMMIT,
272 combine(datasets::SmallMatMulDataset(),
273 make("TransposeA", {false, true}),
274 make("TransposeB", {false, true}),
275 make("DataType", DataType::F16),
276 make("ActivationInfo",
277{
278 ActivationLayerInfo(),
279 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
280})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000281{
282 // Validate output
283 validate(Accessor(_target), _reference, tolerance_fp16);
284}
Renato Arantes36a75da2024-01-26 17:31:18 +0000285FIXTURE_DATA_TEST_CASE(RunLarge,
286 NEMatMulFixture<half>,
287 framework::DatasetMode::NIGHTLY,
288 combine(datasets::LargeMatMulDataset(),
289 make("TransposeA", {false, true}),
290 make("TransposeB", {false, true}),
291 make("DataType", DataType::F16),
292 make("ActivationInfo",
293{
294 ActivationLayerInfo(),
295 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
296})))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000297{
298 // Validate output
299 validate(Accessor(_target), _reference, tolerance_fp16);
300}
Renato Arantes36a75da2024-01-26 17:31:18 +0000301FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors,
302 NEMatMulDynamicTensorsFixture<half>,
303 framework::DatasetMode::PRECOMMIT,
304 combine(datasets::SmallMatMulDataset(),
305 make("TransposeA", {false, true}),
306 make("TransposeB", {false, true}),
307 make("DataType", DataType::F16),
308 make("ActivationInfo",
309{
310 ActivationLayerInfo(),
311 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
312}),
313make("NumberOfRuns", 5)))
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000314{
315 // Validate output
316 validate(Accessor(_target), _reference, tolerance_fp16);
317}
318TEST_SUITE_END() // FP16
319#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
320
321TEST_SUITE_END() // Float
322
Ramy Elgammal05a65e32023-04-24 01:58:21 +0100323#ifdef __aarch64__ // All the GeMM CPU assembly kernels for integer datatypes require aarch64
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100324TEST_SUITE(Quantized)
325
326TEST_SUITE(QASYMM8)
327
Renato Arantes36a75da2024-01-26 17:31:18 +0000328FIXTURE_DATA_TEST_CASE(RunSmall,
329 NEQuantizedMatMulFixture<uint8_t>,
330 framework::DatasetMode::PRECOMMIT,
331 combine(datasets::SmallMatMulDataset(),
332 make("TransposeA", {false, true}),
333 make("TransposeB", {false, true}),
334 make("DataType", DataType::QASYMM8),
335 make("ActivationInfo",
336{
337 ActivationLayerInfo(),
338 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
339}),
340make("NumberOfExtraRuns", {0, 1}),
341make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
342make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}),
343make("OutQInfo", {QuantizationInfo(1.f, 2)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100344{
345 // Validate output
346 validate(Accessor(_target), _reference, tolerance_qasymm8);
347}
348
Renato Arantes36a75da2024-01-26 17:31:18 +0000349FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation,
350 NEQuantizedMatMulFixture<uint8_t>,
351 framework::DatasetMode::NIGHTLY,
352 combine(datasets::SmallerMatMulDataset(),
353 make("TransposeA", {false, true}),
354 make("TransposeB", {false, true}),
355 make("DataType", DataType::QASYMM8),
356 make("ActivationInfo",
357{
358 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU),
359 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
360}),
361make("NumberOfExtraRuns", {0, 1}),
362make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
363make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}),
364make("OutQInfo", {QuantizationInfo(1.f, 2)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100365{
366 // Validate output
367 validate(Accessor(_target), _reference, tolerance_qasymm8);
368}
369
Renato Arantes36a75da2024-01-26 17:31:18 +0000370FIXTURE_DATA_TEST_CASE(RunLarge,
371 NEQuantizedMatMulFixture<uint8_t>,
372 framework::DatasetMode::NIGHTLY,
373 combine(datasets::LargeMatMulDataset(),
374 make("TransposeA", {false, true}),
375 make("TransposeB", {false, true}),
376 make("DataType", DataType::QASYMM8),
377 make("ActivationInfo",
378{
379 ActivationLayerInfo(),
380 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
381}),
382make("NumberOfExtraRuns", {0, 1}),
383make("LhsQInfo", {QuantizationInfo(1.f / 100, 1)}),
384make("RhsQInfo", {QuantizationInfo(1.f / 200, -1)}),
385make("OutQInfo", {QuantizationInfo(1.f, 2)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100386{
387 // Validate output
388 validate(Accessor(_target), _reference, tolerance_qasymm8);
389}
390
391TEST_SUITE_END() // QASYMM8
392
393TEST_SUITE(QASYMM8_SIGNED)
394
Renato Arantes36a75da2024-01-26 17:31:18 +0000395FIXTURE_DATA_TEST_CASE(RunSmall,
396 NEQuantizedMatMulFixture<int8_t>,
397 framework::DatasetMode::PRECOMMIT,
398 combine(datasets::SmallMatMulDataset(),
399 make("TransposeA", {false, true}),
400 make("TransposeB", {false, true}),
401 make("DataType", DataType::QASYMM8_SIGNED),
402 make("ActivationInfo",
403{
404 ActivationLayerInfo(),
405 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
406}),
407make("NumberOfExtraRuns", {0, 1}),
408make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}),
409make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
410make("OutQInfo", {QuantizationInfo(1.f, 1)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100411{
412 // Validate output
413 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
414}
415
Renato Arantes36a75da2024-01-26 17:31:18 +0000416FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation,
417 NEQuantizedMatMulFixture<int8_t>,
418 framework::DatasetMode::NIGHTLY,
419 combine(datasets::SmallerMatMulDataset(),
420 make("TransposeA", {false, true}),
421 make("TransposeB", {false, true}),
422 make("DataType", DataType::QASYMM8_SIGNED),
423 make("ActivationInfo",
424{
425 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU),
426 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
427}),
428make("NumberOfExtraRuns", {0, 1}),
429make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}),
430make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
431make("OutQInfo", {QuantizationInfo(1.f, 1)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100432{
433 // Validate output
434 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
435}
436
Renato Arantes36a75da2024-01-26 17:31:18 +0000437FIXTURE_DATA_TEST_CASE(RunLarge,
438 NEQuantizedMatMulFixture<int8_t>,
439 framework::DatasetMode::NIGHTLY,
440 combine(datasets::LargeMatMulDataset(),
441 make("TransposeA", {false, true}),
442 make("TransposeB", {false, true}),
443 make("DataType", DataType::QASYMM8_SIGNED),
444 make("ActivationInfo",
445{
446 ActivationLayerInfo(),
447 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
448}),
449make("NumberOfExtraRuns", {0, 1}),
450make("LhsQInfo", {QuantizationInfo(1.f / 150, -2)}),
451make("RhsQInfo", {QuantizationInfo(1.f / 250, 1)}),
452make("OutQInfo", {QuantizationInfo(1.f, 1)})))
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100453{
454 // Validate output
455 validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
456}
457
458TEST_SUITE_END() // QASYMM8_SIGNED
459
460TEST_SUITE_END() // Quantized
Renato Arantes36a75da2024-01-26 17:31:18 +0000461#endif // __aarch64__
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100462
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000463TEST_SUITE_END() // MatMul
464TEST_SUITE_END() // NEON
465} // namespace validation
466} // namespace test
467} // namespace arm_compute