blob: a361a5af1697c349ec609c8a8d120e74964d6798 [file] [log] [blame]
Gunes Bayire87fa662023-09-07 12:20:33 +01001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm_compute/runtime/CL/CLTensor.h"
26
27#include "src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h"
28
Gunes Bayira116cd32023-09-13 11:59:34 +010029#include "tests/datasets/MatMulLowpMMULDataset.h"
Gunes Bayire87fa662023-09-07 12:20:33 +010030#include "tests/framework/Macros.h"
31#include "tests/framework/datasets/Datasets.h"
32#include "tests/validation/Validation.h"
33#include "tests/validation/fixtures/MatMulKernelFixture.h"
34#include "tests/validation/reference/Permute.h"
35
36#include <tuple>
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44namespace
45{
Gunes Bayira116cd32023-09-13 11:59:34 +010046constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Gunes Bayire87fa662023-09-07 12:20:33 +010047}
Gunes Bayira116cd32023-09-13 11:59:34 +010048using framework::dataset::make;
Gunes Bayire87fa662023-09-07 12:20:33 +010049
50template <typename T>
Gunes Bayira116cd32023-09-13 11:59:34 +010051using CLMatMulLowpNativeMMULKernelFixture = MatMulKernelValidationFixture<T, ClMatMulLowpNativeMMULKernel, true /* use_mmul */>;
52
53template <typename T>
54using CLMatMulLowpNativeMMULKernelWithBiasFixture = MatMulKernelWithBiasValidation<T, ClMatMulLowpNativeMMULKernel, true /* use_mmul */>;
55
56/** M0 values to test --precommit*/
57const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 });
58
59/** N0 values to test --precommit*/
60const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 });
61
62/** M0 values to test --nightly*/
63const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 2, 4, 5, 8 });
64
65/** N0 values to test --nightly*/
66const auto n0_values_nightly_rhs_nt = framework::dataset::make("N0", { 1, 3, 8, 16 });
Gunes Bayire87fa662023-09-07 12:20:33 +010067
68TEST_SUITE(CL)
69TEST_SUITE(MatMulLowpNativeMMULKernel)
70TEST_SUITE(Validate)
71
72TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
73{
74 using MatMulConfigurationPair = std::pair<MatMulKernelInfo, bool>;
75
76 const std::vector<MatMulConfigurationPair> supported_block_sizes =
77 {
78 // MatMulKernelInfo(adj_lhs, adj_rhs, M0, N0, K0, export_rhs_to_cl_image = false)
79 // Lhs not-transposed, Rhs-not-transposed
80 // TODO: Test Cases
81 };
82
83 // Set big enough shapes so that block sizes are not truncated. Also, set all dimensions equal
84 // so that it doesn't fail for different NT/T configurations. We aim to test the block sizes here,
85 // not the shapes themselves.
86 const TensorInfo lhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED);
87 const TensorInfo rhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED);
88
89 for(auto &pair : supported_block_sizes)
90 {
91 TensorInfo output_info;
Gunes Bayira116cd32023-09-13 11:59:34 +010092 Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first);
93 const bool expected = (pair.second && arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()));
Gunes Bayire87fa662023-09-07 12:20:33 +010094
Gunes Bayira116cd32023-09-13 11:59:34 +010095 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
Gunes Bayire87fa662023-09-07 12:20:33 +010096 }
97}
98
99TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
100{
101 // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations
102 using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, TensorShape, bool>;
103 const std::vector<ShapeConfigurationTuple> shape_configurations =
104 {
Gunes Bayira116cd32023-09-13 11:59:34 +0100105 { TensorShape(32U, 1U), TensorShape(3U, 32U), TensorShape(3U), true },
106 { TensorShape(16U, 12U), TensorShape(3U, 16U), TensorShape(3U), true },
107 { TensorShape(64U, 4U), TensorShape(2U, 64U), TensorShape(2U), true },
108 { TensorShape(16U, 4U), TensorShape(2U, 32U), TensorShape(2U), false }, // Mismatch in the K dimension
109 { TensorShape(16U, 0U), TensorShape(2U, 16U), TensorShape(2U), false }, // Invalid dimension
110 { TensorShape(32U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 32U, 3U, 4U, 5U, 6U), TensorShape(2U), true },
111 { TensorShape(32U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 32U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting
112 { TensorShape(32U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 32U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension
113 { TensorShape(32U, 1U), TensorShape(3U, 32U), TensorShape(1U), false }, // invalid broadcast of bias
114 { TensorShape(32U, 1U), TensorShape(3U, 32U), TensorShape(3U, 3U), false }, // 2d bias is invalid
115 { TensorShape(12U, 12U), TensorShape(3U, 12U), TensorShape(3U), false }, // K must be multiple of 16
Gunes Bayire87fa662023-09-07 12:20:33 +0100116 };
117
118 for(auto &tuple : shape_configurations)
119 {
Gunes Bayira116cd32023-09-13 11:59:34 +0100120 const bool expected = (std::get<3>(tuple) && arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()));
Gunes Bayire87fa662023-09-07 12:20:33 +0100121
122 for(bool adj_lhs :
123 {
124 false, true
125 })
126 {
127 for(bool adj_rhs :
128 {
129 false, true
130 })
131 {
132 TensorShape lhs_shape = std::get<0>(tuple);
133 TensorShape rhs_shape = std::get<1>(tuple);
134 TensorShape bia_shape = std::get<2>(tuple);
135
136 if(adj_lhs)
137 {
138 permute(lhs_shape, PermutationVector(1U, 0U));
139 }
140
141 if(adj_rhs)
142 {
143 permute(rhs_shape, PermutationVector(1U, 0U));
144 }
145
146 const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED);
147 const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED);
148 const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::S32);
149 TensorInfo output_info;
150
Gunes Bayira116cd32023-09-13 11:59:34 +0100151 MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 4, false /* export_rhs_to_cl_image */ };
Gunes Bayire87fa662023-09-07 12:20:33 +0100152
153 Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
154 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
155 }
156 }
157 }
158}
159
160TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL)
161{
162 using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, DataType, bool>;
163 const std::vector<DataTypeConfigurationTuple> data_type_configurations =
164 {
165 { DataType::F32, DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types
166 { DataType::F16, DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types
167 { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision
168 { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, true },
169 { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, true },
170 { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported
171 { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported
172 { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported
173 { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported
174 { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8, false }, // no mixed data types
175 { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types
176 { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types
177 { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types
178 { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types
179 { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types
180 { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types
181 { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types
182 { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types
183 { DataType::QASYMM8, DataType::QASYMM8, DataType::F32, DataType::QASYMM8, false } // Only S32 bias is supported
184 };
185
186 // It's enough to test a single shape and block size configuration while checking data types
187 const TensorShape shape = TensorShape(48U, 48U);
188 const TensorShape bia_shape = TensorShape(48U);
Gunes Bayira116cd32023-09-13 11:59:34 +0100189 const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 4, false };
Gunes Bayire87fa662023-09-07 12:20:33 +0100190 for(auto &tuple : data_type_configurations)
191 {
Gunes Bayira116cd32023-09-13 11:59:34 +0100192 const bool expected = (std::get<4>(tuple) && arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()));
Gunes Bayire87fa662023-09-07 12:20:33 +0100193
194 const TensorInfo lhs_info(shape, 1, std::get<0>(tuple));
195 const TensorInfo rhs_info(shape, 1, std::get<1>(tuple));
196 const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple));
197 TensorInfo output_info(shape, 1, std::get<3>(tuple));
198
199 Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
Gunes Bayira116cd32023-09-13 11:59:34 +0100200
Gunes Bayire87fa662023-09-07 12:20:33 +0100201 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
202 }
203}
204
205TEST_SUITE_END() // Validate
206
207TEST_SUITE(Quantized)
208TEST_SUITE(QASYMM8_SIGNED)
209
Gunes Bayira116cd32023-09-13 11:59:34 +0100210FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
211 framework::DatasetMode::ALL,
212 combine(datasets::SmallMatMulLowpMMULDataset(),
213 make("TransposeA", { false }),
214 make("TransposeB", { false }),
215 m0_values_precommit,
216 n0_values_precommit,
217 make("K0", { 4 }),
218 make("ExportRhsToCLImage", { false }),
219 make("DataType", DataType::QASYMM8_SIGNED)))
220{
221 if(_device_supports_mmul)
222 {
223 // Validate output
224 validate(CLAccessor(_target), _reference, tolerance_quant);
225 }
226}
227
228FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<int8_t>,
229 framework::DatasetMode::ALL,
230 combine(datasets::SmallMatMulLowpMMULWithBiasDataset(),
231 make("TransposeA", { false }),
232 make("TransposeB", { false }),
233 m0_values_precommit,
234 n0_values_precommit,
235 make("K0", { 4 }),
236 make("ExportRhsToCLImage", { false }),
237 make("DataType", DataType::QASYMM8_SIGNED)))
238{
239 if(_device_supports_mmul)
240 {
241 // Validate output
242 validate(CLAccessor(_target), _reference, tolerance_quant);
243 }
244}
245
246FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
247 framework::DatasetMode::NIGHTLY,
248 combine(datasets::LargeMatMulLowpMMULDataset(),
249 make("TransposeA", { false }),
250 make("TransposeB", { false }),
251 m0_values_nightly_lhs_nt,
252 n0_values_nightly_rhs_nt,
253 make("K0", { 4 }),
254 make("ExportRhsToCLImage", { false }),
255 make("DataType", DataType::QASYMM8_SIGNED)))
256{
257 if(_device_supports_mmul)
258 {
259 // Validate output
260 validate(CLAccessor(_target), _reference, tolerance_quant);
261 }
262}
263
264// Running High Dimensional test is enough for qasymm8_signed, because we're stressing the number of dimensions, not data type or M0/N0/K0
265// It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels
266FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
267 framework::DatasetMode::ALL,
268 combine(datasets::HighDimensionalMatMulLowpMMULDataset(),
269 make("TransposeA", { false }),
270 make("TransposeB", { false }),
271 make("M0", { 2 }),
272 make("N0", { 2 }),
273 make("K0", { 4 }),
274 make("ExportRhsToCLImage", { false }),
275 make("DataType", DataType::QASYMM8_SIGNED)))
276{
277 if(_device_supports_mmul)
278 {
279 // Validate output
280 validate(CLAccessor(_target), _reference, tolerance_quant);
281 }
282}
Gunes Bayire87fa662023-09-07 12:20:33 +0100283
284TEST_SUITE_END() // QASYMM8_SIGNED
Gunes Bayira116cd32023-09-13 11:59:34 +0100285
Gunes Bayire87fa662023-09-07 12:20:33 +0100286TEST_SUITE(QASYMM8)
287
Gunes Bayira116cd32023-09-13 11:59:34 +0100288FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
289 framework::DatasetMode::ALL,
290 combine(datasets::SmallMatMulLowpMMULDatasetSubset(),
291 make("TransposeA", { false }),
292 make("TransposeB", { false }),
293 m0_values_precommit,
294 n0_values_precommit,
295 make("K0", { 4 }),
296 make("ExportRhsToCLImage", { false }),
297 make("DataType", DataType::QASYMM8)))
298{
299 if(_device_supports_mmul)
300 {
301 // Validate output
302 validate(CLAccessor(_target), _reference, tolerance_quant);
303 }
304}
305
306FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<uint8_t>,
307 framework::DatasetMode::ALL,
308 combine(datasets::SmallMatMulLowpMMULWithBiasDataset(),
309 make("TransposeA", { false }),
310 make("TransposeB", { false }),
311 m0_values_precommit,
312 n0_values_precommit,
313 make("K0", { 4 }),
314 make("ExportRhsToCLImage", { false }),
315 make("DataType", DataType::QASYMM8)))
316{
317 if(_device_supports_mmul)
318 {
319 // Validate output
320 validate(CLAccessor(_target), _reference, tolerance_quant);
321 }
322}
323
324FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
325 framework::DatasetMode::NIGHTLY,
326 combine(datasets::LargeMatMulLowpMMULDataset(),
327 make("TransposeA", { false }),
328 make("TransposeB", { false }),
329 m0_values_nightly_lhs_nt,
330 n0_values_nightly_rhs_nt,
331 make("K0", { 4 }),
332 make("ExportRhsToCLImage", { false }),
333 make("DataType", DataType::QASYMM8)))
334{
335 if(_device_supports_mmul)
336 {
337 // Validate output
338 validate(CLAccessor(_target), _reference, tolerance_quant);
339 }
340}
Gunes Bayire87fa662023-09-07 12:20:33 +0100341
342TEST_SUITE_END() // QASYMM8
343TEST_SUITE_END() // Quantized
344TEST_SUITE_END() // MatMulLowpNativeMMULKernel
345TEST_SUITE_END() // CL
346} // namespace validation
347} // namespace test
348} // namespace arm_compute