blob: 90eee4fb82b4bb788cb0bb27ac3c0a01c0687c61 [file] [log] [blame]
Gunes Bayir9d0c4de2023-04-13 18:22:58 +01001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm_compute/runtime/CL/CLTensor.h"
26
27#include "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h"
28
29#include "tests/datasets/LargeMatMulDataset.h"
30#include "tests/datasets/SmallMatMulDataset.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34#include "tests/validation/fixtures/MatMulKernelFixture.h"
35#include "tests/validation/reference/Permute.h"
36
37#include <tuple>
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
45namespace
46{
47constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
48}
49template <typename T>
50using CLMatMulLowpNativeKernelFixture = MatMulKernelValidationFixture<T, ClMatMulLowpNativeKernel>;
51
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +010052template <typename T>
53using CLMatMulLowpKernelWithBiasFixture = MatMulKernelWithBiasValidation<T, ClMatMulLowpNativeKernel>;
54
Gunes Bayir9d0c4de2023-04-13 18:22:58 +010055/** M0 values to test --precommit*/
56const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 });
57
58/** N0 values to test --precommit*/
59const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 });
60
61/** K0 values to test --precommit*/
62const auto k0_values_precommit = framework::dataset::make("K0", { 2, 3 });
63
64/** M0 values to test --nightly*/
65const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 1, 2, 3, 4, 5, 6, 7, 8 });
66const auto m0_values_nightly_lhs_t = framework::dataset::make("M0", { 1, 2, 3, 4, 8 });
67
68/** N0 values to test --nightly*/
69const auto n0_values_nightly_rhs_nt = framework::dataset::make("N0", { 1, 2, 3, 4, 8, 16 });
Omar Al Khatib467daef2023-04-13 14:56:23 +010070const auto n0_values_nightly_rhs_t = framework::dataset::make("N0", { 1, 2, 3, 4, 8 });
Gunes Bayir9d0c4de2023-04-13 18:22:58 +010071
72/** K0 values to test --nightly*/
73const auto k0_values_nightly_lhs_nt_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 16 });
Omar Al Khatib467daef2023-04-13 14:56:23 +010074const auto k0_values_nightly_rhs_t = framework::dataset::make("K0", { 1, 2, 3, 4, 8 });
75const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 5, 6, 7, 8 });
Gunes Bayir9d0c4de2023-04-13 18:22:58 +010076
77TEST_SUITE(CL)
78TEST_SUITE(MatMulLowpNativeKernel)
79TEST_SUITE(Validate)
80
81TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
82{
83 using MatMulConfigurationPair = std::pair<MatMulKernelInfo, bool>;
84
85 const std::vector<MatMulConfigurationPair> supported_block_sizes =
86 {
87 // MatMulKernelInfo(adj_lhs, adj_rhs, M0, N0, K0, export_rhs_to_cl_image = false)
88 // Lhs not-transposed, Rhs-not-transposed
89 { MatMulKernelInfo(false, false, 0, 1, 1), false }, // M0 should be > 0
90 { MatMulKernelInfo(false, false, 3, 5, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16}
91 { MatMulKernelInfo(false, false, 3, 6, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16}
92 { MatMulKernelInfo(false, false, 3, 3, 17), false }, // K0 not in {1, 2, 3, 4, 8, 16}
93 { MatMulKernelInfo(false, false, 3, 3, 7), false }, // K0 not in {1, 2, 3, 4, 8, 16}
94 { MatMulKernelInfo(false, false, 9, 1, 2), true },
95 { MatMulKernelInfo(false, false, 3, 16, 3), true },
96 { MatMulKernelInfo(false, false, 7, 3, 4), true },
97 { MatMulKernelInfo(false, false, 7, 3, 4, true), true }, // export to CLImage is unsupported for quantized types
98 };
99
100 // Set big enough shapes so that block sizes are not truncated. Also, set all dimensions equal
101 // so that it doesn't fail for different NT/T configurations. We aim to test the block sizes here,
102 // not the shapes themselves.
103 const TensorInfo lhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED);
104 const TensorInfo rhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED);
105
106 for(auto &pair : supported_block_sizes)
107 {
108 TensorInfo output_info;
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100109 Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100110
111 ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS);
112 }
113}
114
115TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
116{
117 // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100118 using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, TensorShape, bool>;
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100119 const std::vector<ShapeConfigurationTuple> shape_configurations =
120 {
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100121 { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U), true },
122 { TensorShape(10U, 12U), TensorShape(3U, 10U), TensorShape(3U), true },
123 { TensorShape(8U, 4U), TensorShape(2U, 8U), TensorShape(2U), true },
124 { TensorShape(8U, 4U), TensorShape(2U, 5U), TensorShape(2U), false }, // Mismatch in the K dimension
125 { TensorShape(5U, 0U), TensorShape(2U, 5U), TensorShape(2U), false }, // Invalid dimension
126 { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), true },
127 { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting
128 { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension
129 { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(1U), false }, // invalid broadcast of bias
130 { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U, 3U), false }, // 2d bias is invalid
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100131 };
132
133 for(auto &tuple : shape_configurations)
134 {
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100135 const bool expected = std::get<3>(tuple);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100136
137 for(bool adj_lhs :
138 {
139 false, true
140 })
141 {
142 for(bool adj_rhs :
143 {
144 false, true
145 })
146 {
147 TensorShape lhs_shape = std::get<0>(tuple);
148 TensorShape rhs_shape = std::get<1>(tuple);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100149 TensorShape bia_shape = std::get<2>(tuple);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100150
151 if(adj_lhs)
152 {
153 permute(lhs_shape, PermutationVector(1U, 0U));
154 }
155
156 if(adj_rhs)
157 {
158 permute(rhs_shape, PermutationVector(1U, 0U));
159 }
160
161 const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED);
162 const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100163 const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::S32);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100164 TensorInfo output_info;
165
166 MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ };
167
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100168 Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100169 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
170 }
171 }
172 }
173}
174
175TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL)
176{
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100177 using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, DataType, bool>;
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100178 const std::vector<DataTypeConfigurationTuple> data_type_configurations =
179 {
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100180 { DataType::F32, DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types
181 { DataType::F16, DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types
182 { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision
183 { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, true },
184 { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, true },
185 { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported
186 { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported
187 { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported
188 { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported
189 { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8, false }, // no mixed data types
190 { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types
191 { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types
192 { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types
193 { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types
194 { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types
195 { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types
196 { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types
197 { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types
198 { DataType::QASYMM8, DataType::QASYMM8, DataType::F32, DataType::QASYMM8, false } // Only S32 bias is supported
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100199 };
200
201 // It's enough to test a single shape and block size configuration while checking data types
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100202 const TensorShape shape = TensorShape(10U, 10U);
203 const TensorShape bia_shape = TensorShape(10U);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100204 const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false };
205 for(auto &tuple : data_type_configurations)
206 {
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100207 const bool expected = std::get<4>(tuple);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100208
209 const TensorInfo lhs_info(shape, 1, std::get<0>(tuple));
210 const TensorInfo rhs_info(shape, 1, std::get<1>(tuple));
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100211 const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple));
212 TensorInfo output_info(shape, 1, std::get<3>(tuple));
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100213
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100214 Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100215 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
216 }
217}
218
219TEST_SUITE_END() // Validate
220
221TEST_SUITE(Quantized)
222TEST_SUITE(QASYMM8_SIGNED)
223FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(),
224 framework::dataset::make("TransposeA", { true, false })),
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100225 framework::dataset::make("TransposeB", { true, false })),
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100226 m0_values_precommit),
227 n0_values_precommit),
228 k0_values_precommit),
229 framework::dataset::make("ExportRhsToCLImage", { false })),
230 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
231{
232 // Validate output
233 validate(CLAccessor(_target), _reference, tolerance_quant);
234}
235FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
236 framework::dataset::make("TransposeA", { true, false })),
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100237 framework::dataset::make("TransposeB", { true, false })),
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100238 m0_values_precommit),
239 n0_values_precommit),
240 k0_values_precommit),
241 framework::dataset::make("ExportRhsToCLImage", { false })),
242 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
243{
244 // Validate output
245 validate(CLAccessor(_target), _reference, tolerance_quant);
246}
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100247FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpKernelWithBiasFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
248 framework::dataset::make("TransposeA", { true, false })),
249 framework::dataset::make("TransposeB", { true, false })),
250 m0_values_precommit),
251 n0_values_precommit),
252 k0_values_precommit),
253 framework::dataset::make("ExportRhsToCLImage", { false })),
254 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
255{
256 // Validate output
257 validate(CLAccessor(_target), _reference, tolerance_quant);
258}
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100259FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY,
260 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
261 framework::dataset::make("TransposeA", { false })),
262 framework::dataset::make("TransposeB", { false })),
263 m0_values_nightly_lhs_nt),
264 n0_values_nightly_rhs_nt),
265 k0_values_nightly_lhs_nt_rhs_nt),
266 framework::dataset::make("ExportRhsToCLImage", { false })),
267 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
268{
269 // Validate output
270 validate(CLAccessor(_target), _reference, tolerance_quant);
271}
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100272FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY,
273 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
274 framework::dataset::make("TransposeA", { false })),
275 framework::dataset::make("TransposeB", { true })),
276 m0_values_nightly_lhs_nt),
277 n0_values_nightly_rhs_t),
278 k0_values_nightly_rhs_t),
279 framework::dataset::make("ExportRhsToCLImage", { false })),
280 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
281{
282 // Validate output
283 validate(CLAccessor(_target), _reference, tolerance_quant);
284}
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100285FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY,
286 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
287 framework::dataset::make("TransposeA", { true })),
288 framework::dataset::make("TransposeB", { false })),
289 m0_values_nightly_lhs_t),
290 n0_values_nightly_rhs_nt),
291 k0_values_nightly_lhs_t_rhs_nt),
292 framework::dataset::make("ExportRhsToCLImage", { false })),
293 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
294{
295 // Validate output
296 validate(CLAccessor(_target), _reference, tolerance_quant);
297}
Omar Al Khatib467daef2023-04-13 14:56:23 +0100298FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::NIGHTLY,
299 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
300 framework::dataset::make("TransposeA", { true })),
301 framework::dataset::make("TransposeB", { true })),
302 m0_values_nightly_lhs_t),
303 n0_values_nightly_rhs_t),
304 k0_values_nightly_rhs_t),
305 framework::dataset::make("ExportRhsToCLImage", { false })),
306 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
307{
308 // Validate output
309 validate(CLAccessor(_target), _reference, tolerance_quant);
310}
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100311// Running High Dimensional test is enough for qasymm8_signed, because we're stressing the number of dimensions, not data type or M0/N0/K0
312// It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels
313FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulLowpNativeKernelFixture<int8_t>, framework::DatasetMode::ALL,
314 combine(combine(combine(combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(),
315 framework::dataset::make("TransposeA", { true, false })),
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100316 framework::dataset::make("TransposeB", { true, false })),
Omar Al Khatib467daef2023-04-13 14:56:23 +0100317 framework::dataset::make("M0", { 2 })),
318 framework::dataset::make("N0", { 2 })),
319 framework::dataset::make("K0", { 2 })),
320 framework::dataset::make("ExportRhsToCLImage", { false })),
321 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)))
322{
323 // Validate output
324 validate(CLAccessor(_target), _reference, tolerance_quant);
325}
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100326TEST_SUITE_END() // QASYMM8_SIGNED
327
328TEST_SUITE(QASYMM8)
329FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(),
330 framework::dataset::make("TransposeA", { true, false })),
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100331 framework::dataset::make("TransposeB", { true, false })),
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100332 m0_values_precommit),
333 n0_values_precommit),
334 k0_values_precommit),
335 framework::dataset::make("ExportRhsToCLImage", { false })),
336 framework::dataset::make("DataType", DataType::QASYMM8)))
337{
338 // Validate output
339 validate(CLAccessor(_target), _reference, tolerance_quant);
340}
341FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
342 framework::dataset::make("TransposeA", { true, false })),
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100343 framework::dataset::make("TransposeB", { true, false })),
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100344 m0_values_precommit),
345 n0_values_precommit),
346 k0_values_precommit),
347 framework::dataset::make("ExportRhsToCLImage", { false })),
348 framework::dataset::make("DataType", DataType::QASYMM8)))
349{
350 // Validate output
351 validate(CLAccessor(_target), _reference, tolerance_quant);
352}
353FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
354 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
355 framework::dataset::make("TransposeA", { false })),
356 framework::dataset::make("TransposeB", { false })),
357 m0_values_nightly_lhs_nt),
358 n0_values_nightly_rhs_nt),
359 k0_values_nightly_lhs_nt_rhs_nt),
360 framework::dataset::make("ExportRhsToCLImage", { false })),
361 framework::dataset::make("DataType", DataType::QASYMM8)))
362{
363 // Validate output
364 validate(CLAccessor(_target), _reference, tolerance_quant);
365}
Jakub Sujak5e99a3e2023-04-18 08:33:56 +0100366FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
367 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
368 framework::dataset::make("TransposeA", { false })),
369 framework::dataset::make("TransposeB", { true })),
370 m0_values_nightly_lhs_nt),
371 n0_values_nightly_rhs_t),
372 k0_values_nightly_rhs_t),
373 framework::dataset::make("ExportRhsToCLImage", { false })),
374 framework::dataset::make("DataType", DataType::QASYMM8)))
375{
376 // Validate output
377 validate(CLAccessor(_target), _reference, tolerance_quant);
378}
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100379FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
380 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
381 framework::dataset::make("TransposeA", { true })),
382 framework::dataset::make("TransposeB", { false })),
383 m0_values_nightly_lhs_t),
384 n0_values_nightly_rhs_nt),
385 k0_values_nightly_lhs_t_rhs_nt),
386 framework::dataset::make("ExportRhsToCLImage", { false })),
387 framework::dataset::make("DataType", DataType::QASYMM8)))
388{
389 // Validate output
390 validate(CLAccessor(_target), _reference, tolerance_quant);
391}
Omar Al Khatib467daef2023-04-13 14:56:23 +0100392FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulLowpNativeKernelFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
393 combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(),
394 framework::dataset::make("TransposeA", { true })),
395 framework::dataset::make("TransposeB", { true })),
396 m0_values_nightly_lhs_t),
397 n0_values_nightly_rhs_t),
398 k0_values_nightly_rhs_t),
399 framework::dataset::make("ExportRhsToCLImage", { false })),
400 framework::dataset::make("DataType", DataType::QASYMM8)))
401{
402 // Validate output
403 validate(CLAccessor(_target), _reference, tolerance_quant);
404}
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100405TEST_SUITE_END() // QASYMM8
406TEST_SUITE_END() // Quantized
407TEST_SUITE_END() // MatMulLowpNativeKernel
408TEST_SUITE_END() // CL
409} // namespace validation
410} // namespace test
411} // namespace arm_compute