blob: 079047328ac1d6e124943882ee2013c5803469eb [file] [log] [blame]
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +01001/*
Michele Di Giorgio223aba92021-04-28 16:03:32 +01002 * Copyright (c) 2017-2021 Arm Limited.
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
26#include "arm_compute/runtime/Tensor.h"
27#include "arm_compute/runtime/TensorAllocator.h"
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +010028#include "src/core/helpers/MemoryHelpers.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010029#include "src/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
30#include "src/cpu/kernels/CpuGemmMatrixMultiplyKernel.h"
31#include "src/cpu/kernels/CpuGemmTranspose1xWKernel.h"
32#include "src/cpu/operators/CpuGemm.h"
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010033#include "tests/NEON/Accessor.h"
Pablo Tello2fdc4092017-11-23 15:50:08 +000034#include "tests/NEON/Helper.h"
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010035#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010036#include "tests/datasets/LargeGEMMDataset.h"
37#include "tests/datasets/SmallGEMMDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000038#include "tests/datasets/TinyGEMMDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010039#include "tests/framework/Asserts.h"
40#include "tests/framework/Macros.h"
41#include "tests/framework/datasets/Datasets.h"
42#include "tests/validation/Validation.h"
43#include "tests/validation/fixtures/GEMMFixture.h"
Pablo Tello2fdc4092017-11-23 15:50:08 +000044#include "tests/validation/fixtures/GEMMInterleave4x4Fixture.h"
Pablo Tello088cc7f2017-12-07 15:20:55 +000045#include "tests/validation/fixtures/GEMMTranspose1xWFixture.h"
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010046
47namespace arm_compute
48{
49namespace test
50{
51namespace validation
52{
53namespace
54{
Gian Marco Iodicefbf3ecc2018-08-23 17:26:21 +010055constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
56#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
57RelativeTolerance<half_float::half> rel_tolerance_f16(half(0.2)); /**< Relative tolerance value for comparing reference's output against implementation's output for FP16 data types */
58const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance value for comparing reference's output against implementation's output for FP16 data types */
59constexpr float tolerance_num = 0.07f; /**< Tolerance number for FP16 data types */
60#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010061/** CNN data types */
62const auto CNNDataTypes = framework::dataset::make("DataType",
63{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000064#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010065 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000066#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010067 DataType::F32,
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +010068});
Pablo Tello2fdc4092017-11-23 15:50:08 +000069
70const auto data_interleave = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12);
Pablo Tello088cc7f2017-12-07 15:20:55 +000071const auto data_transpose = framework::dataset::make("M", 8, 14) * framework::dataset::make("N", 7, 14);
72
Gian Marco Iodicefeaea102020-09-03 13:20:34 +010073/** Zero padding test */
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +010074template <typename FunctionType>
75bool validate_zero_padding(unsigned int dim0_value, unsigned int dim1_value)
Gian Marco Iodicefeaea102020-09-03 13:20:34 +010076{
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +010077 const TensorShape in_shape(dim0_value, dim1_value);
Michele Di Giorgio93b75e02021-06-21 12:00:43 +010078 TensorInfo in(in_shape, 1, DataType::U32);
79 TensorInfo dst;
80
81 ARM_COMPUTE_EXPECT(in.is_resizable(), framework::LogLevel::ERRORS);
82
83 // Validate zero-padding
84 FunctionType func;
85
86 func.configure(&in, &dst);
87
88 return in.padding().empty();
89}
90
Michele Di Giorgiocf9e29e2020-10-08 11:54:42 +010091/* Zero padding test for GEMM kernels */
92bool validate_gemm_zero_padding(const TensorShape shape0, const TensorShape shape1)
93{
94 // Create tensors
Michele Di Giorgio53832b22021-06-21 14:45:44 +010095 TensorInfo in0(shape0, 1, DataType::F32);
96 TensorInfo in1(shape1, 1, DataType::F32);
97 TensorInfo dst;
Michele Di Giorgiocf9e29e2020-10-08 11:54:42 +010098
99 // Validate zero-padding
Michele Di Giorgio53832b22021-06-21 14:45:44 +0100100 cpu::kernels::CpuGemmMatrixMultiplyKernel gemm;
Michele Di Giorgiocf9e29e2020-10-08 11:54:42 +0100101 gemm.configure(&in0, &in1, &dst, 1.0, false);
102
Michele Di Giorgio53832b22021-06-21 14:45:44 +0100103 return in0.padding().empty() && in1.padding().empty() && dst.padding().empty();
Michele Di Giorgiocf9e29e2020-10-08 11:54:42 +0100104}
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100105} // namespace
106
107TEST_SUITE(NEON)
108TEST_SUITE(GEMM)
109
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100110/** Test case for memory injection in @ref cpu::CpuGemm.
111 *
112 * Configure the operator once and inject memory at run-time in multiple executions.
113 *
114 * Checks performed in order:
115 * - Both runs compute the same output
116 */
117TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
118{
119 auto gemm = std::make_unique<cpu::CpuGemm>();
120 const auto lhs_info = TensorInfo(TensorShape(3U, 3U), 1, DataType::F32);
121 const auto rhs_info = TensorInfo(TensorShape(4U, 3U), 1, DataType::F32);
122 const auto c_info = TensorInfo(TensorShape(4U, 3U), 1, DataType::F32);
123 auto dst_info = TensorInfo(TensorShape(4U, 3U), 1, DataType::F32);
124 const auto gemm_info = GEMMInfo{};
125 gemm->configure(&lhs_info, &rhs_info, &c_info, &dst_info, 1.f, 1.f, gemm_info);
126
127 // telhs are newly created every call of this lambda function
128 auto lhs = create_tensor<Tensor>(lhs_info);
129 auto rhs = create_tensor<Tensor>(rhs_info);
130 auto c = create_tensor<Tensor>(c_info);
131 lhs.allocator()->allocate();
132 rhs.allocator()->allocate();
133 c.allocator()->allocate();
134
135 ITensorPack run_pack{ { TensorType::ACL_SRC_0, &lhs }, { TensorType::ACL_SRC_1, &rhs }, { TensorType::ACL_SRC_2, &c } };
136 ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &rhs }, { TensorType::ACL_SRC_2, &c } };
137
138 auto mg = MemoryGroup{};
139 auto ws = manage_workspace<Tensor>(gemm->workspace(), mg, run_pack, prep_pack);
140
141 auto run_conv = [&]() -> Tensor
142 {
143 auto dst = create_tensor<Tensor>(dst_info);
144 dst.allocator()->allocate();
145 run_pack.add_tensor(TensorType::ACL_DST, &dst);
146
147 library->fill_tensor_value(Accessor(lhs), 1.f);
148 library->fill_tensor_value(Accessor(rhs), 2.f);
149 library->fill_tensor_value(Accessor(c), 3.f);
150 // This operator is configured once and captured by this lambda.
151 gemm->prepare(prep_pack);
152 gemm->run(run_pack);
153 return dst;
154 };
155 auto result_0 = run_conv();
156 auto result_1 = run_conv();
157 for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
158 {
159 ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
160 }
161}
162
163/** Test case for memory injection in @ref NEGEMM.
164 *
165 * Make sure @ref NEGEMM still works through injecting the memory at configure time using the old API.
166 *
167 * Checks performed in order:
168 * - Both runs compute the same output
169 */
170TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
171{
172 auto gemm = std::make_unique<NEGEMM>();
173 const auto lhs_info = TensorInfo(TensorShape(3U, 3U), 1, DataType::F32);
174 const auto rhs_info = TensorInfo(TensorShape(4U, 3U), 1, DataType::F32);
175 const auto c_info = TensorInfo(TensorShape(4U, 3U), 1, DataType::F32);
176 auto dst_info = TensorInfo(TensorShape(4U, 3U), 1, DataType::F32);
177 const auto gemm_info = GEMMInfo{};
178 auto run_conv = [&]()
179 {
180 auto lhs = create_tensor<Tensor>(lhs_info);
181 auto rhs = create_tensor<Tensor>(rhs_info);
182 auto c = create_tensor<Tensor>(c_info);
183 auto dst = create_tensor<Tensor>(dst_info);
184 gemm->configure(&lhs, &rhs, &c, &dst, 1.f, 1.f, gemm_info);
185 lhs.allocator()->allocate();
186 rhs.allocator()->allocate();
187 c.allocator()->allocate();
188 dst.allocator()->allocate();
189 library->fill_tensor_value(Accessor(lhs), 1.f);
190 library->fill_tensor_value(Accessor(rhs), 2.f);
191 library->fill_tensor_value(Accessor(c), 3.f);
192 gemm->run();
193 return dst;
194 };
195 auto result_0 = run_conv();
196 auto result_1 = run_conv();
197 for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
198 {
199 ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
200 }
201}
202
Pablo Tello088cc7f2017-12-07 15:20:55 +0000203TEST_SUITE(TRANSPOSE_1XW)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100204using CpuGemmTranspose1xW = NESynthetizeFunctionWithZeroConstantKernelBorder<cpu::kernels::CpuGemmTranspose1xWKernel>;
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +0100205DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
206 framework::dataset::make("N", { 1, 23, 63, 101 }),
207 framework::dataset::make("K", { 1, 47, 29, 27 })),
208 n_value, k_value)
209{
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100210 bool status = validate_zero_padding<CpuGemmTranspose1xW>(n_value, k_value);
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +0100211 ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
212}
213
214TEST_SUITE(U32)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100215using CpuGemmTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, CpuGemmTranspose1xW, uint32_t>;
216FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U32))
Pablo Tello088cc7f2017-12-07 15:20:55 +0000217{
218 // Validate output
219 validate(Accessor(_target), _reference);
220}
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +0100221TEST_SUITE_END() // U32
222
223TEST_SUITE(U16)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100224using CpuGemmTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, CpuGemmTranspose1xW, uint16_t>;
225FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U16))
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +0100226{
227 // Validate output
228 validate(Accessor(_target), _reference);
229}
230TEST_SUITE_END() // U16
231
232TEST_SUITE(U8)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100233using CpuGemmTranspose1xWFixture = GEMMTranspose1xWValidationFixture<Tensor, Accessor, CpuGemmTranspose1xW, uint8_t>;
234FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * framework::dataset::make("DataType", DataType::U8))
Gian Marco Iodiceb3182b12020-09-04 08:44:52 +0100235{
236 // Validate output
237 validate(Accessor(_target), _reference);
238}
239TEST_SUITE_END() // U8
Pablo Tello088cc7f2017-12-07 15:20:55 +0000240
Pablo Tello088cc7f2017-12-07 15:20:55 +0000241TEST_SUITE_END() // TRANSPOSE_1XW
242
Pablo Tello2fdc4092017-11-23 15:50:08 +0000243TEST_SUITE(INTERLEAVE_4X4)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100244using CpuGemmInterleave4x4 = NESynthetizeFunctionWithZeroConstantKernelBorder<cpu::kernels::CpuGemmInterleave4x4Kernel>;
Pablo Tello2fdc4092017-11-23 15:50:08 +0000245
Gian Marco Iodicefeaea102020-09-03 13:20:34 +0100246DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
247 framework::dataset::make("M", { 1, 23, 63, 101 }),
248 framework::dataset::make("K", { 1, 47, 29, 27 })),
249 m_value, k_value)
250{
Michele Di Giorgio4dfc5532021-06-30 12:05:34 +0100251 bool status = validate_zero_padding<cpu::kernels::CpuGemmInterleave4x4Kernel>(m_value, k_value);
Gian Marco Iodicefeaea102020-09-03 13:20:34 +0100252 ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
253}
254
255TEST_SUITE(U32)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100256using CpuGemmInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, CpuGemmInterleave4x4, uint32_t>;
257FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::U32))
Pablo Tello2fdc4092017-11-23 15:50:08 +0000258{
259 // Validate output
260 validate(Accessor(_target), _reference);
261}
Gian Marco Iodicefeaea102020-09-03 13:20:34 +0100262TEST_SUITE_END() // U32
263
264TEST_SUITE(U16)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100265using CpuGemmInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, CpuGemmInterleave4x4, uint16_t>;
266FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::U16))
Gian Marco Iodicefeaea102020-09-03 13:20:34 +0100267{
268 // Validate output
269 validate(Accessor(_target), _reference);
270}
271TEST_SUITE_END() // U16
272
273TEST_SUITE(U8)
Michele Di Giorgio93b75e02021-06-21 12:00:43 +0100274using CpuGemmInterleave4x4Fixture = GEMMInterleave4x4ValidationFixture<Tensor, Accessor, CpuGemmInterleave4x4, uint8_t>;
275FIXTURE_DATA_TEST_CASE(RunSmall, CpuGemmInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * framework::dataset::make("DataType", DataType::QASYMM8))
Gian Marco Iodicefeaea102020-09-03 13:20:34 +0100276{
277 // Validate output
278 validate(Accessor(_target), _reference);
279}
280TEST_SUITE_END() // U8
Pablo Tello2fdc4092017-11-23 15:50:08 +0000281
Pablo Tello2fdc4092017-11-23 15:50:08 +0000282TEST_SUITE_END() // INTERLEAVE_4X4
283
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100284template <typename T>
285using NEGEMMFixture = GEMMValidationFixture<Tensor, Accessor, NEGEMM, T>;
286
Pablo Tello0e37b5c2018-10-30 11:18:37 +0000287template <typename T>
288using NEGEMMFixtureDisabledC = GEMMValidationFixture<Tensor, Accessor, NEGEMM, T, true>;
289
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100290TEST_SUITE(Float)
Michele Di Giorgiocf9e29e2020-10-08 11:54:42 +0100291DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(framework::dataset::make("In0", { TensorShape(21U, 13U),
292 TensorShape(31U, 1U),
293 TensorShape(31U, 1U),
294 TensorShape(8U, 2U),
295 TensorShape(38U, 12U),
296 TensorShape(32U, 1U)
297 }),
298 framework::dataset::make("In1", { TensorShape(33U, 21U),
299 TensorShape(23U, 31U),
300 TensorShape(23U, 31U),
301 TensorShape(16U, 8U),
302 TensorShape(21U, 38U),
303 TensorShape(17U, 32U)
304 })),
305 shape0, shape1)
306{
307 bool status = validate_gemm_zero_padding(shape0, shape1);
308 ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
309}
310
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000311#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100312TEST_SUITE(FP16)
Pablo Tello0e37b5c2018-10-30 11:18:37 +0000313FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallGEMMDataset(),
314 framework::dataset::make("ReshapeWeights", { true, false })),
315 framework::dataset::make("DataType", DataType::F16)))
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100316{
317 // Validate output
Gian Marco Iodicefbf3ecc2018-08-23 17:26:21 +0100318 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100319}
Pablo Tello0e37b5c2018-10-30 11:18:37 +0000320FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeGEMMDataset(),
321 framework::dataset::make("ReshapeWeights", { true, false })),
322
323 framework::dataset::make("DataType", DataType::F16)))
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100324{
325 // Validate output
Gian Marco Iodicefbf3ecc2018-08-23 17:26:21 +0100326 validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100327}
328TEST_SUITE_END()
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000329#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100330
331TEST_SUITE(FP32)
Pablo Tello0e37b5c2018-10-30 11:18:37 +0000332FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallGEMMDataset(),
333 framework::dataset::make("ReshapeWeights", { true, false })),
334
335 framework::dataset::make("DataType", DataType::F32)))
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100336{
337 // Validate output
338 validate(Accessor(_target), _reference, tolerance_f);
339}
Pablo Tello0e37b5c2018-10-30 11:18:37 +0000340FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeGEMMDataset(),
341 framework::dataset::make("ReshapeWeights", { true, false })),
342
343 framework::dataset::make("DataType", DataType::F32)))
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100344{
345 // Validate output
346 validate(Accessor(_target), _reference, tolerance_f);
347}
Pablo Tello0e37b5c2018-10-30 11:18:37 +0000348TEST_SUITE(DisabledC)
349FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixtureDisabledC<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallGEMMDataset(),
350 framework::dataset::make("ReshapeWeights", { true, false })),
351
352 framework::dataset::make("DataType", DataType::F32)))
353{
354 // Validate output
355 validate(Accessor(_target), _reference, tolerance_f);
356}
357TEST_SUITE_END()
358
Gian Marco Iodice37a46112021-08-04 15:22:28 +0100359TEST_SUITE(BatchedGEMMDisabledC)
360FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixtureDisabledC<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallBatchedGEMMDataset(),
361 framework::dataset::make("ReshapeWeights", { true, false })),
362
363 framework::dataset::make("DataType", DataType::F32)))
364{
365 // Validate output
366 validate(Accessor(_target), _reference, tolerance_f);
367}
368TEST_SUITE_END()
369
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100370TEST_SUITE_END()
371TEST_SUITE_END()
372
Moritz Pflanzer4dfc2352017-08-02 14:51:36 +0100373TEST_SUITE_END()
374TEST_SUITE_END()
375} // namespace validation
376} // namespace test
377} // namespace arm_compute