blob: ac46b67c9e0b0e960ae69974ee0b1ccf3615872c [file] [log] [blame]
Gunes Bayire87fa662023-09-07 12:20:33 +01001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm_compute/runtime/CL/CLTensor.h"
26
27#include "src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h"
28
Gunes Bayira116cd32023-09-13 11:59:34 +010029#include "tests/datasets/MatMulLowpMMULDataset.h"
Gunes Bayire87fa662023-09-07 12:20:33 +010030#include "tests/framework/Macros.h"
31#include "tests/framework/datasets/Datasets.h"
32#include "tests/validation/Validation.h"
33#include "tests/validation/fixtures/MatMulKernelFixture.h"
34#include "tests/validation/reference/Permute.h"
35
36#include <tuple>
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44namespace
45{
Gunes Bayira116cd32023-09-13 11:59:34 +010046constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Gunes Bayire87fa662023-09-07 12:20:33 +010047}
Gunes Bayira116cd32023-09-13 11:59:34 +010048using framework::dataset::make;
Gunes Bayire87fa662023-09-07 12:20:33 +010049
50template <typename T>
Gunes Bayira116cd32023-09-13 11:59:34 +010051using CLMatMulLowpNativeMMULKernelFixture = MatMulKernelValidationFixture<T, ClMatMulLowpNativeMMULKernel, true /* use_mmul */>;
52
53template <typename T>
54using CLMatMulLowpNativeMMULKernelWithBiasFixture = MatMulKernelWithBiasValidation<T, ClMatMulLowpNativeMMULKernel, true /* use_mmul */>;
55
56/** M0 values to test --precommit*/
57const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 });
58
59/** N0 values to test --precommit*/
60const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 });
61
62/** M0 values to test --nightly*/
63const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 2, 4, 5, 8 });
Gunes Bayira396da12023-09-20 10:09:43 +010064const auto m0_values_nightly_lhs_t = framework::dataset::make("M0", { 2, 4, 8 });
Gunes Bayira116cd32023-09-13 11:59:34 +010065
66/** N0 values to test --nightly*/
Gunes Bayira396da12023-09-20 10:09:43 +010067const auto n0_values_nightly = framework::dataset::make("N0", { 1, 3, 8, 16 });
Gunes Bayire87fa662023-09-07 12:20:33 +010068
69TEST_SUITE(CL)
70TEST_SUITE(MatMulLowpNativeMMULKernel)
71TEST_SUITE(Validate)
72
73TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
74{
75 using MatMulConfigurationPair = std::pair<MatMulKernelInfo, bool>;
76
77 const std::vector<MatMulConfigurationPair> supported_block_sizes =
78 {
79 // MatMulKernelInfo(adj_lhs, adj_rhs, M0, N0, K0, export_rhs_to_cl_image = false)
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +010080 { MatMulKernelInfo(false, false, 0, 1, 4), false }, // M0 should be > 0
81 { MatMulKernelInfo(false, true, 3, 5, 4), false }, // N0 not in {1, 2, 3, 4, 8, 16}
82 { MatMulKernelInfo(false, false, 3, 6, 4), false }, // N0 not in {1, 2, 3, 4, 8, 16}
83 { MatMulKernelInfo(false, false, 3, 3, 8), false }, // K0 not in 4
Gunes Bayira396da12023-09-20 10:09:43 +010084 { MatMulKernelInfo(true, false, 5, 3, 4), false }, // M0 not in {1, 2, 3, 4, 8, 16} when Lhs is transposed
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +010085 { MatMulKernelInfo(false, false, 9, 1, 4), true },
86 { MatMulKernelInfo(false, true, 3, 16, 4), true },
87 { MatMulKernelInfo(false, false, 7, 3, 4), true },
Gunes Bayira396da12023-09-20 10:09:43 +010088 { MatMulKernelInfo(true, false, 8, 3, 4), true },
89 { MatMulKernelInfo(true, true, 4, 3, 4), true },
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +010090 { MatMulKernelInfo(false, false, 7, 3, 4, true), false }, // export to CLImage is unsupported for quantized types
Gunes Bayire87fa662023-09-07 12:20:33 +010091 };
92
93 // Set big enough shapes so that block sizes are not truncated. Also, set all dimensions equal
94 // so that it doesn't fail for different NT/T configurations. We aim to test the block sizes here,
95 // not the shapes themselves.
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +010096 const TensorInfo lhs_info = TensorInfo(TensorShape(64U, 64U), 1, DataType::QASYMM8_SIGNED);
97 const TensorInfo rhs_info = TensorInfo(TensorShape(64U, 64U), 1, DataType::QASYMM8_SIGNED);
Gunes Bayire87fa662023-09-07 12:20:33 +010098
99 for(auto &pair : supported_block_sizes)
100 {
101 TensorInfo output_info;
Gunes Bayira116cd32023-09-13 11:59:34 +0100102 Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first);
103 const bool expected = (pair.second && arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()));
Gunes Bayire87fa662023-09-07 12:20:33 +0100104
Gunes Bayira116cd32023-09-13 11:59:34 +0100105 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
Gunes Bayire87fa662023-09-07 12:20:33 +0100106 }
107}
108
109TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
110{
111 // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations
112 using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, TensorShape, bool>;
113 const std::vector<ShapeConfigurationTuple> shape_configurations =
114 {
Gunes Bayira116cd32023-09-13 11:59:34 +0100115 { TensorShape(32U, 1U), TensorShape(3U, 32U), TensorShape(3U), true },
116 { TensorShape(16U, 12U), TensorShape(3U, 16U), TensorShape(3U), true },
117 { TensorShape(64U, 4U), TensorShape(2U, 64U), TensorShape(2U), true },
118 { TensorShape(16U, 4U), TensorShape(2U, 32U), TensorShape(2U), false }, // Mismatch in the K dimension
119 { TensorShape(16U, 0U), TensorShape(2U, 16U), TensorShape(2U), false }, // Invalid dimension
120 { TensorShape(32U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 32U, 3U, 4U, 5U, 6U), TensorShape(2U), true },
121 { TensorShape(32U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 32U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting
122 { TensorShape(32U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 32U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension
123 { TensorShape(32U, 1U), TensorShape(3U, 32U), TensorShape(1U), false }, // invalid broadcast of bias
124 { TensorShape(32U, 1U), TensorShape(3U, 32U), TensorShape(3U, 3U), false }, // 2d bias is invalid
125 { TensorShape(12U, 12U), TensorShape(3U, 12U), TensorShape(3U), false }, // K must be multiple of 16
Gunes Bayire87fa662023-09-07 12:20:33 +0100126 };
127
128 for(auto &tuple : shape_configurations)
129 {
Gunes Bayira116cd32023-09-13 11:59:34 +0100130 const bool expected = (std::get<3>(tuple) && arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()));
Gunes Bayire87fa662023-09-07 12:20:33 +0100131
132 for(bool adj_lhs :
133 {
134 false, true
135 })
136 {
137 for(bool adj_rhs :
138 {
139 false, true
140 })
141 {
142 TensorShape lhs_shape = std::get<0>(tuple);
143 TensorShape rhs_shape = std::get<1>(tuple);
144 TensorShape bia_shape = std::get<2>(tuple);
145
146 if(adj_lhs)
147 {
148 permute(lhs_shape, PermutationVector(1U, 0U));
149 }
150
151 if(adj_rhs)
152 {
153 permute(rhs_shape, PermutationVector(1U, 0U));
154 }
155
156 const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED);
157 const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED);
158 const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::S32);
159 TensorInfo output_info;
160
Gunes Bayira116cd32023-09-13 11:59:34 +0100161 MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 4, false /* export_rhs_to_cl_image */ };
Gunes Bayire87fa662023-09-07 12:20:33 +0100162
163 Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
164 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
165 }
166 }
167 }
168}
169
170TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL)
171{
172 using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, DataType, bool>;
173 const std::vector<DataTypeConfigurationTuple> data_type_configurations =
174 {
175 { DataType::F32, DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types
176 { DataType::F16, DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types
177 { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision
178 { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, true },
179 { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, true },
180 { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported
181 { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported
182 { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported
183 { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported
184 { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8, false }, // no mixed data types
185 { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types
186 { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types
187 { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types
188 { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types
189 { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types
190 { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types
191 { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types
192 { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types
193 { DataType::QASYMM8, DataType::QASYMM8, DataType::F32, DataType::QASYMM8, false } // Only S32 bias is supported
194 };
195
196 // It's enough to test a single shape and block size configuration while checking data types
197 const TensorShape shape = TensorShape(48U, 48U);
198 const TensorShape bia_shape = TensorShape(48U);
Gunes Bayira116cd32023-09-13 11:59:34 +0100199 const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 4, false };
Gunes Bayire87fa662023-09-07 12:20:33 +0100200 for(auto &tuple : data_type_configurations)
201 {
Gunes Bayira116cd32023-09-13 11:59:34 +0100202 const bool expected = (std::get<4>(tuple) && arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()));
Gunes Bayire87fa662023-09-07 12:20:33 +0100203
204 const TensorInfo lhs_info(shape, 1, std::get<0>(tuple));
205 const TensorInfo rhs_info(shape, 1, std::get<1>(tuple));
206 const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple));
207 TensorInfo output_info(shape, 1, std::get<3>(tuple));
208
209 Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
Gunes Bayira116cd32023-09-13 11:59:34 +0100210
Gunes Bayire87fa662023-09-07 12:20:33 +0100211 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
212 }
213}
214
215TEST_SUITE_END() // Validate
216
217TEST_SUITE(Quantized)
218TEST_SUITE(QASYMM8_SIGNED)
219
Gunes Bayira116cd32023-09-13 11:59:34 +0100220FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
221 framework::DatasetMode::ALL,
222 combine(datasets::SmallMatMulLowpMMULDataset(),
Gunes Bayira396da12023-09-20 10:09:43 +0100223 make("TransposeA", { false, true }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100224 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100225 m0_values_precommit,
226 n0_values_precommit,
227 make("K0", { 4 }),
228 make("ExportRhsToCLImage", { false }),
229 make("DataType", DataType::QASYMM8_SIGNED)))
230{
231 if(_device_supports_mmul)
232 {
233 // Validate output
234 validate(CLAccessor(_target), _reference, tolerance_quant);
235 }
236}
237
238FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<int8_t>,
239 framework::DatasetMode::ALL,
240 combine(datasets::SmallMatMulLowpMMULWithBiasDataset(),
Gunes Bayira396da12023-09-20 10:09:43 +0100241 make("TransposeA", { false, true }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100242 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100243 m0_values_precommit,
244 n0_values_precommit,
245 make("K0", { 4 }),
246 make("ExportRhsToCLImage", { false }),
247 make("DataType", DataType::QASYMM8_SIGNED)))
248{
249 if(_device_supports_mmul)
250 {
251 // Validate output
252 validate(CLAccessor(_target), _reference, tolerance_quant);
253 }
254}
255
Gunes Bayira396da12023-09-20 10:09:43 +0100256FIXTURE_DATA_TEST_CASE(RunLargeLhsNotTransposed, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
Gunes Bayira116cd32023-09-13 11:59:34 +0100257 framework::DatasetMode::NIGHTLY,
258 combine(datasets::LargeMatMulLowpMMULDataset(),
259 make("TransposeA", { false }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100260 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100261 m0_values_nightly_lhs_nt,
Gunes Bayira396da12023-09-20 10:09:43 +0100262 n0_values_nightly,
263 make("K0", { 4 }),
264 make("ExportRhsToCLImage", { false }),
265 make("DataType", DataType::QASYMM8_SIGNED)))
266{
267 if(_device_supports_mmul)
268 {
269 // Validate output
270 validate(CLAccessor(_target), _reference, tolerance_quant);
271 }
272}
273
274FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
275 framework::DatasetMode::NIGHTLY,
276 combine(datasets::LargeMatMulLowpMMULDataset(),
277 make("TransposeA", { true }),
278 make("TransposeB", { false, true }),
279 m0_values_nightly_lhs_t,
280 n0_values_nightly,
Gunes Bayira116cd32023-09-13 11:59:34 +0100281 make("K0", { 4 }),
282 make("ExportRhsToCLImage", { false }),
283 make("DataType", DataType::QASYMM8_SIGNED)))
284{
285 if(_device_supports_mmul)
286 {
287 // Validate output
288 validate(CLAccessor(_target), _reference, tolerance_quant);
289 }
290}
291
292// Running High Dimensional test is enough for qasymm8_signed, because we're stressing the number of dimensions, not data type or M0/N0/K0
293// It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels
294FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
295 framework::DatasetMode::ALL,
296 combine(datasets::HighDimensionalMatMulLowpMMULDataset(),
Gunes Bayira396da12023-09-20 10:09:43 +0100297 make("TransposeA", { false, true }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100298 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100299 make("M0", { 2 }),
300 make("N0", { 2 }),
301 make("K0", { 4 }),
302 make("ExportRhsToCLImage", { false }),
303 make("DataType", DataType::QASYMM8_SIGNED)))
304{
305 if(_device_supports_mmul)
306 {
307 // Validate output
308 validate(CLAccessor(_target), _reference, tolerance_quant);
309 }
310}
Gunes Bayire87fa662023-09-07 12:20:33 +0100311
312TEST_SUITE_END() // QASYMM8_SIGNED
Gunes Bayira116cd32023-09-13 11:59:34 +0100313
Gunes Bayire87fa662023-09-07 12:20:33 +0100314TEST_SUITE(QASYMM8)
315
Gunes Bayira116cd32023-09-13 11:59:34 +0100316FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
317 framework::DatasetMode::ALL,
318 combine(datasets::SmallMatMulLowpMMULDatasetSubset(),
Gunes Bayira396da12023-09-20 10:09:43 +0100319 make("TransposeA", { false, true }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100320 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100321 m0_values_precommit,
322 n0_values_precommit,
323 make("K0", { 4 }),
324 make("ExportRhsToCLImage", { false }),
325 make("DataType", DataType::QASYMM8)))
326{
327 if(_device_supports_mmul)
328 {
329 // Validate output
330 validate(CLAccessor(_target), _reference, tolerance_quant);
331 }
332}
333
334FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<uint8_t>,
335 framework::DatasetMode::ALL,
336 combine(datasets::SmallMatMulLowpMMULWithBiasDataset(),
Gunes Bayira396da12023-09-20 10:09:43 +0100337 make("TransposeA", { false, true }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100338 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100339 m0_values_precommit,
340 n0_values_precommit,
341 make("K0", { 4 }),
342 make("ExportRhsToCLImage", { false }),
343 make("DataType", DataType::QASYMM8)))
344{
345 if(_device_supports_mmul)
346 {
347 // Validate output
348 validate(CLAccessor(_target), _reference, tolerance_quant);
349 }
350}
351
Gunes Bayira396da12023-09-20 10:09:43 +0100352FIXTURE_DATA_TEST_CASE(RunLargeLhsNotTransposed, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
Gunes Bayira116cd32023-09-13 11:59:34 +0100353 framework::DatasetMode::NIGHTLY,
354 combine(datasets::LargeMatMulLowpMMULDataset(),
355 make("TransposeA", { false }),
Gunes Bayir2ad0a6b2023-09-19 15:37:38 +0100356 make("TransposeB", { false, true }),
Gunes Bayira116cd32023-09-13 11:59:34 +0100357 m0_values_nightly_lhs_nt,
Gunes Bayira396da12023-09-20 10:09:43 +0100358 n0_values_nightly,
359 make("K0", { 4 }),
360 make("ExportRhsToCLImage", { false }),
361 make("DataType", DataType::QASYMM8)))
362{
363 if(_device_supports_mmul)
364 {
365 // Validate output
366 validate(CLAccessor(_target), _reference, tolerance_quant);
367 }
368}
369
370FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
371 framework::DatasetMode::NIGHTLY,
372 combine(datasets::LargeMatMulLowpMMULDataset(),
373 make("TransposeA", { true }),
374 make("TransposeB", { false, true }),
375 m0_values_nightly_lhs_t,
376 n0_values_nightly,
Gunes Bayira116cd32023-09-13 11:59:34 +0100377 make("K0", { 4 }),
378 make("ExportRhsToCLImage", { false }),
379 make("DataType", DataType::QASYMM8)))
380{
381 if(_device_supports_mmul)
382 {
383 // Validate output
384 validate(CLAccessor(_target), _reference, tolerance_quant);
385 }
386}
Gunes Bayire87fa662023-09-07 12:20:33 +0100387
388TEST_SUITE_END() // QASYMM8
389TEST_SUITE_END() // Quantized
390TEST_SUITE_END() // MatMulLowpNativeMMULKernel
391TEST_SUITE_END() // CL
392} // namespace validation
393} // namespace test
394} // namespace arm_compute