blob: 6ba5012d1566dd223b88f71a74d599a629731455 [file] [log] [blame]
giuros01b3204e72019-04-01 13:50:22 +01001/*
SiCong Li3a501662020-06-26 10:02:06 +01002 * Copyright (c) 2019-2020 Arm Limited.
giuros01b3204e72019-04-01 13:50:22 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h"
Gian Marco Iodice7026b302019-06-26 17:18:11 +010025#include "arm_compute/core/KernelDescriptors.h"
giuros01b3204e72019-04-01 13:50:22 +010026#include "arm_compute/core/Types.h"
27#include "arm_compute/core/utils/misc/ShapeCalculator.h"
28#include "arm_compute/runtime/CL/CLTensor.h"
29#include "arm_compute/runtime/CL/CLTensorAllocator.h"
30#include "tests/CL/CLAccessor.h"
31#include "tests/CL/Helper.h"
32#include "tests/PaddingCalculator.h"
33#include "tests/datasets/ShapeDatasets.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Macros.h"
36#include "tests/framework/datasets/Datasets.h"
37#include "tests/validation/Validation.h"
38#include "tests/validation/fixtures/GEMMFixture.h"
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46using namespace arm_compute::misc::shape_calculator;
47
48// Create function for CLGEMMMatrixMultiplyNativeKernel
49using CLGEMMMatrixMultiplyNative = CLSynthetizeFunction<CLGEMMMatrixMultiplyNativeKernel>;
50
51// Fixture for CLGEMMMatrixMultiplyNative
52template <typename T>
53using CLGEMMMatrixMultiplyNativeFixture = GEMMMatrixMultiplyNativeValidationFixture<CLTensor, CLAccessor, T, CLGEMMMatrixMultiplyNative>;
54
55// Fixture for CLGEMMMatrixMultiplyNative3D
56template <typename T>
57using CLGEMMMatrixMultiplyNative3DFixture = GEMMMatrixMultiplyNative3DValidationFixture<CLTensor, CLAccessor, T, CLGEMMMatrixMultiplyNative>;
58
59namespace
60{
61// *INDENT-OFF*
62// clang-format off
63RelativeTolerance<float> rel_tolerance_f32(0.001f);
64constexpr float abs_tolerance_f32(0.0001f);
65
giuros01b3204e72019-04-01 13:50:22 +010066/** Alpha values to test - Precommit */
67const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} );
68
Gian Marco Iodice944170e2019-06-24 14:40:30 +010069/** Beta values to test - Precommit */
70const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} );
71
giuros01b3204e72019-04-01 13:50:22 +010072/** M values to test */
73const auto m_values = framework::dataset::make("M", 37);
74
75/** M_W values to test */
76const auto m_w_values = framework::dataset::make("M_W", 5);
77
78/** M_H values to test */
79const auto m_h_values = framework::dataset::make("M_H", 7);
80
81/** N values to test */
82const auto n_values = framework::dataset::make("N", 51);
83
84/** K values to test */
85const auto k_values = framework::dataset::make("K", 23);
86
87/** Batch size values to test */
88const auto b_values = framework::dataset::make("batch_size", 1, 3);
89
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +010090/** Activation values to test */
91const auto act_values = framework::dataset::make("Activation",
92{
93 ActivationLayerInfo(),
94 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
95});
96
giuros01b3204e72019-04-01 13:50:22 +010097/** M0 values to test - Precommit */
Gian Marco Iodiced820db62019-08-05 14:23:23 +010098const auto m0_values_precommit = framework::dataset::make("M0", { 4, 6 });
giuros01b3204e72019-04-01 13:50:22 +010099
100/** N0 values to test - Precommit */
Gian Marco Iodiced820db62019-08-05 14:23:23 +0100101const auto n0_values_precommit = framework::dataset::make("N0", { 4 });
giuros01b3204e72019-04-01 13:50:22 +0100102
103/** K0 values to test - Precommit */
104const auto k0_values_precommit = framework::dataset::make("K0", { 4 });
105
106/** H0 values to test - Precommit */
107const auto h0_values_precommit = framework::dataset::make("H0", 1, 3);
108
109/** M0 values to test - Nightly */
110const auto m0_values_nightly = framework::dataset::make("M0", 1, 8);
111
112/** N0 values to test - Nightly */
113const auto n0_values_nightly = framework::dataset::make("N0", { 2, 3, 4, 8 });
114
115/** K0 values to test - Nightly */
116const auto k0_values_nightly = framework::dataset::make("K0", { 2, 3, 4, 8 });
117
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100118/** Broadcast bias from vector to matrix */
Gian Marco Iodiced820db62019-08-05 14:23:23 +0100119const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } );
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100120
SiCong Lib972ae62020-08-03 15:39:45 +0100121/** Boundary handling cases for testing partial/non-partial (full) block dimensions, resulting from different combinations
122 * of M, M0, N and N0 values.
123 * M0 and N0 are kept constant, while the different test cases need to vary M and N.
124 *
125 * Eg. M = 64 and N = 33 result in a block dimension that has no partial blocks (all full blocks) in Y dimension and
126 * parital blocks in X dimension.
127 */
128const auto boundary_handling_cases = combine(combine(combine(combine(combine(combine(combine(combine(combine(
129 // Large k to force potential out-of-bound reads on input0
130 framework::dataset::make("K", 315),
131 // Batch size == 1 to force potential out-of-bound reads on input0
132 framework::dataset::make("batch_size", 1)),
133 framework::dataset::make("M0", 4)),
134 framework::dataset::make("N0", 4)),
135 framework::dataset::make("K0", 4)),
136 // Only need to test F32 as F16 shares identical boundary handling logics
137 framework::dataset::make("DataType", DataType::F32)),
138 framework::dataset::make("alpha", -0.75f )),
139 framework::dataset::make("beta", -0.35f )),
140 broadcast_bias_values),
141 framework::dataset::make("Activation", ActivationLayerInfo()));
142
giuros01b3204e72019-04-01 13:50:22 +0100143/** Configuration test */
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100144void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
giuros01b3204e72019-04-01 13:50:22 +0100145{
146 const unsigned int M = m_value;
147 const unsigned int N = n_value;
148 const unsigned int K = k_value;
149
150 GEMMLHSMatrixInfo lhs_info;
151 lhs_info.m0 = m0_value;
152 lhs_info.k0 = k0_value;
153
154 GEMMRHSMatrixInfo rhs_info;
155 rhs_info.n0 = n0_value;
156 rhs_info.k0 = k0_value;
157
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100158 GEMMKernelInfo kernel_info;
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100159 kernel_info.m = M;
160 kernel_info.n = N;
161 kernel_info.k = K;
162 kernel_info.broadcast_bias = broadcast_bias;
163 kernel_info.activation_info = act_info;
giuros01b3204e72019-04-01 13:50:22 +0100164
165 const TensorShape lhs_shape(K, M, b_value);
166 const TensorShape rhs_shape(N, K, b_value);
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100167 const TensorShape bias_shape(N,
168 broadcast_bias? 1 : M,
169 broadcast_bias? 1 : b_value);
giuros01b3204e72019-04-01 13:50:22 +0100170 const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
171 TensorInfo(rhs_shape, 1, data_type),
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100172 kernel_info);
173
giuros01b3204e72019-04-01 13:50:22 +0100174 // Create tensors
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100175 CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
giuros01b3204e72019-04-01 13:50:22 +0100176 CLTensor rhs = create_tensor<CLTensor>(rhs_shape, data_type);
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100177 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
178 CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
giuros01b3204e72019-04-01 13:50:22 +0100179
180 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
181 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100182 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
giuros01b3204e72019-04-01 13:50:22 +0100183 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
184
185 // Create and configure function
186 CLGEMMMatrixMultiplyNative gemm;
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100187 gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
giuros01b3204e72019-04-01 13:50:22 +0100188}
SiCong Li3a501662020-06-26 10:02:06 +0100189/** Zero padding test */
190bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
191{
192 const unsigned int M = m_value;
193 const unsigned int N = n_value;
194 const unsigned int K = k_value;
195
196 GEMMLHSMatrixInfo lhs_info;
197 lhs_info.m0 = m0_value;
198 lhs_info.k0 = k0_value;
199
200 GEMMRHSMatrixInfo rhs_info;
201 rhs_info.n0 = n0_value;
202 rhs_info.k0 = k0_value;
203
204 GEMMKernelInfo kernel_info;
205 kernel_info.m = M;
206 kernel_info.n = N;
207 kernel_info.k = K;
208 kernel_info.broadcast_bias = broadcast_bias;
209 kernel_info.activation_info = act_info;
210
211 const TensorShape lhs_shape(K, M, b_value);
212 const TensorShape rhs_shape(N, K, b_value);
213 const TensorShape bias_shape(N,
214 broadcast_bias? 1 : M,
215 broadcast_bias? 1 : b_value);
216 const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
217 TensorInfo(rhs_shape, 1, data_type),
218 kernel_info);
219
220 // Create tensors
221 CLTensor lhs = create_tensor<CLTensor>(lhs_shape, data_type);
222 CLTensor rhs = create_tensor<CLTensor>(rhs_shape, data_type);
223 CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
224 CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
225
226 ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
227 ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
228 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
229 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
230
231 // Create and configure function
232 CLGEMMMatrixMultiplyNative gemm;
233 gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
234
SiCong Li406a13f2020-07-15 12:09:58 +0100235 // Padding can be added along rhs and bias's X dimension
236 return dst.info()->padding().empty() && lhs.info()->padding().empty() && bias.info()->padding().bottom == 0 && bias.info()->padding().top == 0;
SiCong Li3a501662020-06-26 10:02:06 +0100237}
giuros01b3204e72019-04-01 13:50:22 +0100238} // namespace
239
240TEST_SUITE(CL)
241TEST_SUITE(GEMMMatrixMultiplyNative)
242TEST_SUITE(Float)
243TEST_SUITE(FP32)
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100244DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(
giuros01b3204e72019-04-01 13:50:22 +0100245 m_values,
246 n_values),
247 k_values),
248 framework::dataset::make("batch_size", 1)),
249 m0_values_precommit),
250 n0_values_precommit),
251 k0_values_precommit),
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100252 broadcast_bias_values),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100253 act_values),
254m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, act_value)
giuros01b3204e72019-04-01 13:50:22 +0100255{
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100256 validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, DataType::F32, act_value);
giuros01b3204e72019-04-01 13:50:22 +0100257}
258
SiCong Li3a501662020-06-26 10:02:06 +0100259/** Validate zero padding tests
260 *
261 * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
262 *
263 * Checks performed in order:
264 * - No partial blocks in both x and y dimensions
265 * - Partial blocks in x dimension
266 * - Partial blocks in y dimension
267 * - Partial blocks in both x and y dimensions
268 * - No blocks in both x and y dimensions, scalar store (N0==1)
SiCong Li3b64e3e2020-07-28 09:01:28 +0100269 * - Special case: partial_n0 == 5 (vstore1 should be invoked instead of vstore_partial_1)
SiCong Li3a501662020-06-26 10:02:06 +0100270 */
271DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
SiCong Li3b64e3e2020-07-28 09:01:28 +0100272framework::dataset::make("M", { 24, 64, 101, 1, 50, 256, }),
273framework::dataset::make("N", { 48, 29, 16, 122, 20, 21, })),
274framework::dataset::make("M0", { 4, 8, 7, 2, 1, 8, })),
275framework::dataset::make("N0", { 4, 4, 16, 3, 1, 8, })),
SiCong Li3a501662020-06-26 10:02:06 +0100276m_value, n_value, m0_value, n0_value)
277{
278 bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, false, DataType::F32, ActivationLayerInfo());
279 ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
280}
281
SiCong Lib972ae62020-08-03 15:39:45 +0100282FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingPartialInXPartialInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
283 combine(combine(
284 framework::dataset::make("M", 3),
285 framework::dataset::make("N", 1)),
286 boundary_handling_cases))
287{
288 // Validate output
289 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
290}
291
292FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingPartialInXFullInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
293 combine(combine(
294 framework::dataset::make("M", 64),
295 framework::dataset::make("N", 51)),
296 boundary_handling_cases))
297{
298 // Validate output
299 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
300}
301
302FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingFullInXFullInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
303 combine(combine(
304 framework::dataset::make("M", 64),
305 framework::dataset::make("N", 32)),
306 boundary_handling_cases))
307{
308 // Validate output
309 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
310}
311
312FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingFullInXPartialInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
313 combine(combine(
314 framework::dataset::make("M", 37),
315 framework::dataset::make("N", 32)),
316 boundary_handling_cases))
317{
318 // Validate output
319 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
320}
321
giuros01b3204e72019-04-01 13:50:22 +0100322FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100323 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
giuros01b3204e72019-04-01 13:50:22 +0100324 m_values,
325 n_values),
326 k_values),
327 b_values),
328 m0_values_precommit),
329 n0_values_precommit),
330 k0_values_precommit),
331 framework::dataset::make("DataType", DataType::F32)),
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100332 a_values),
333 beta_values),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100334 broadcast_bias_values),
335 act_values))
giuros01b3204e72019-04-01 13:50:22 +0100336{
337 // Validate output
338 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
339}
340
Michalis Spyrou1d897772019-12-09 18:47:29 +0000341FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::DISABLED,
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100342 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
giuros01b3204e72019-04-01 13:50:22 +0100343 m_values,
344 n_values),
345 k_values),
346 b_values),
347 m0_values_nightly),
348 n0_values_nightly),
349 k0_values_nightly),
350 framework::dataset::make("DataType", DataType::F32)),
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100351 a_values),
352 beta_values),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100353 broadcast_bias_values),
354 act_values))
giuros01b3204e72019-04-01 13:50:22 +0100355{
356 // Validate output
357 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
358}
359
360FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture<float>, framework::DatasetMode::ALL,
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100361 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
giuros01b3204e72019-04-01 13:50:22 +0100362 m_w_values,
363 m_h_values),
364 n_values),
365 k_values),
366 b_values),
367 m0_values_precommit),
368 n0_values_precommit),
369 k0_values_precommit),
370 framework::dataset::make("DataType", DataType::F32)),
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100371 a_values),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100372 beta_values),
373 act_values))
giuros01b3204e72019-04-01 13:50:22 +0100374{
375 // Validate output
376 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
377}
378
Michalis Spyrou1d897772019-12-09 18:47:29 +0000379FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyNative3DFixture<float>, framework::DatasetMode::DISABLED,
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100380 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
giuros01b3204e72019-04-01 13:50:22 +0100381 m_w_values,
382 m_h_values),
383 n_values),
384 k_values),
385 b_values),
386 m0_values_nightly),
387 n0_values_nightly),
388 k0_values_nightly),
389 framework::dataset::make("DataType", DataType::F32)),
Gian Marco Iodice944170e2019-06-24 14:40:30 +0100390 a_values),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100391 beta_values),
392 act_values))
giuros01b3204e72019-04-01 13:50:22 +0100393{
394 // Validate output
395 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
396}
397TEST_SUITE_END() // FP32
giuros01b3204e72019-04-01 13:50:22 +0100398TEST_SUITE_END() // Float
399TEST_SUITE_END() // GEMMMatrixMulipltyNative
400TEST_SUITE_END() // CL
401} // namespace validation
402} // namespace test
403} // namespace arm_compute