blob: 53038c8177ea9007dba2e3b040d8f4ea63384015 [file] [log] [blame]
Gian Marco Iodiceadc53952019-02-15 11:10:31 +00001/*
Gian Marco Iodice10e88a72021-11-29 12:49:19 +00002 * Copyright (c) 2019-2022 Arm Limited.
Gian Marco Iodiceadc53952019-02-15 11:10:31 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marco Iodice7026b302019-06-26 17:18:11 +010024#include "arm_compute/core/KernelDescriptors.h"
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000025#include "arm_compute/core/Types.h"
SiCongLi31778612021-11-12 17:33:45 +000026#include "arm_compute/core/experimental/PostOps.h"
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000027#include "arm_compute/core/utils/misc/ShapeCalculator.h"
28#include "arm_compute/runtime/CL/CLTensor.h"
29#include "arm_compute/runtime/CL/CLTensorAllocator.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010030#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h"
31#include "src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000032#include "tests/CL/CLAccessor.h"
33#include "tests/CL/Helper.h"
34#include "tests/PaddingCalculator.h"
35#include "tests/datasets/ShapeDatasets.h"
36#include "tests/framework/Asserts.h"
37#include "tests/framework/Macros.h"
38#include "tests/framework/datasets/Datasets.h"
39#include "tests/validation/Validation.h"
40#include "tests/validation/fixtures/GEMMFixture.h"
41
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48using namespace arm_compute::misc::shape_calculator;
Georgios Pinitas856f66e2021-04-22 21:13:21 +010049using namespace arm_compute::opencl::kernels;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000050
Georgios Pinitas856f66e2021-04-22 21:13:21 +010051// Create function for ClGemmReshapeRhsMatrixKernel
52using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<ClGemmReshapeRhsMatrixKernel>;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000053
Georgios Pinitas856f66e2021-04-22 21:13:21 +010054// Create function for ClGemmMatrixMultiplyReshapedOnlyRhsKernel
55using CLGEMMMatrixMultiplyReshapedOnlyRHS = CLSynthetizeOperator<ClGemmMatrixMultiplyReshapedOnlyRhsKernel>;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000056
57// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS
58template <typename T>
59using CLGEMMMatrixMultiplyReshapedOnlyRHSFixture = GEMMMatrixMultiplyReshapedOnlyRHSValidationFixture<CLTensor, CLAccessor, T, CLGEMMReshapeRHSMatrix, CLGEMMMatrixMultiplyReshapedOnlyRHS>;
60
61// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS3D
62template <typename T>
63using CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture = GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture<CLTensor, CLAccessor, T, CLGEMMReshapeRHSMatrix, CLGEMMMatrixMultiplyReshapedOnlyRHS>;
64
SiCongLiafa19722021-10-24 19:12:33 +010065// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS with post ops
66template <typename T>
67using CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture =
68 GEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsValidationFixture<CLTensor, CLAccessor, T, CLGEMMReshapeRHSMatrix, CLGEMMMatrixMultiplyReshapedOnlyRHS>;
69
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000070namespace
71{
72// *INDENT-OFF*
73// clang-format off
74RelativeTolerance<float> rel_tolerance_f32(0.001f);
75constexpr float abs_tolerance_f32(0.0001f);
76
Gian Marco Iodice781cba72020-06-19 16:56:57 +010077RelativeTolerance<float> rel_tolerance_f16(0.001f);
78constexpr float abs_tolerance_f16(0.01f);
79
Sheri Zhang1a378102020-04-30 12:59:39 +010080/** Alpha values to test */
81const auto a_values = framework::dataset::make("alpha", {-0.75f} );
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000082
Sheri Zhang1a378102020-04-30 12:59:39 +010083/** Beta values to test */
Gian Marco Iodice6f931342020-09-15 14:17:41 +010084const auto beta_values = framework::dataset::make("beta", {-0.35f} );
Georgios Pinitasb0f342e2019-05-21 13:32:43 +010085
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000086/** M values to test */
87const auto m_values = framework::dataset::make("M", 37);
88
89/** M_W values to test */
90const auto m_w_values = framework::dataset::make("M_W", 5);
91
92/** M_H values to test */
93const auto m_h_values = framework::dataset::make("M_H", 7);
94
95/** N values to test */
96const auto n_values = framework::dataset::make("N", 51);
97
98/** K values to test */
99const auto k_values = framework::dataset::make("K", 23);
100
101/** Batch size values to test */
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100102const auto b_values = framework::dataset::make("batch_size", 2);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000103
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100104/** Activation values to test */
105const auto act_values = framework::dataset::make("Activation",
106{
Giorgio Arena2ab585b2021-02-25 15:41:49 +0000107 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 10.f),
Gian Marco Iodice635013a2022-11-03 09:30:56 +0000108 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ELU),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100109});
110
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100111/** M0 values to test - precommit */
112const auto m0_values_precommit = framework::dataset::make("M0", { 4 });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000113
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100114/** N0 values to test - precommit*/
115const auto n0_values_precommit = framework::dataset::make("N0", { 4 });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000116
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100117/** K0 values to test - precommit*/
118const auto k0_values_precommit = framework::dataset::make("K0", { 4 });
119
120/** M0 values to test - nightly */
121const auto m0_values_nightly = framework::dataset::make("M0", { 8 });
122
123/** N0 values to test - nightly */
124const auto n0_values_nightly = framework::dataset::make("N0", { 16 });
125
126/** K0 values to test - nightly */
127const auto k0_values_nightly = framework::dataset::make("K0", { 16 });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000128
Sheri Zhang1a378102020-04-30 12:59:39 +0100129/** H0 values to test */
130const auto h0_values = framework::dataset::make("H0", 1, 3);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000131
132/** Interleave values to test with RHS matrix */
133const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, false });
134
135/** Transpose values to test with RHS matrix */
Gian Marco Iodiceba5e0962019-03-11 12:17:44 +0000136const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, false });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000137
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100138/** Broadcast bias from vector to matrix */
Gian Marco Iodiced820db62019-08-05 14:23:23 +0100139const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } );
Georgios Pinitasb0f342e2019-05-21 13:32:43 +0100140
SiCong Lib972ae62020-08-03 15:39:45 +0100141/** Boundary handling cases for testing partial/non-partial (full) block dimensions, resulting from different combinations
142 * of M, M0, N and N0 values.
143 * M0 and N0 are kept constant, while the different test cases need to vary M and N.
144 *
145 * Eg. M = 64 and N = 33 result in a block dimension that has no partial blocks (all full blocks) in Y dimension and
146 * parital blocks in X dimension.
147 */
148const auto boundary_handling_cases = combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
149 // Large k to force potential out-of-bound reads on input0
150 framework::dataset::make("K", 315),
151 // Batch size == 1 to force potential out-of-bound reads on input0
152 framework::dataset::make("batch_size", 1)),
153 framework::dataset::make("M0", 4)),
154 framework::dataset::make("N0", 4)),
155 framework::dataset::make("K0", 4)),
156 framework::dataset::make("H0", 3)),
157 i_values_rhs),
158 t_values_rhs),
159 framework::dataset::make("export_to_cl_image_rhs", {true, false})),
160 // Only need to test F32 as F16 shares identical boundary handling logics
161 framework::dataset::make("DataType", DataType::F32)),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000162 framework::dataset::make("alpha", -0.75f )),
163 framework::dataset::make("beta", -0.35f )),
SiCong Lib972ae62020-08-03 15:39:45 +0100164 broadcast_bias_values),
165 framework::dataset::make("Activation", ActivationLayerInfo()));
166
SiCongLiafa19722021-10-24 19:12:33 +0100167/** Post Ops */
168using PostOpArgBroadcast = CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture<float>::PostOpArgBroadcast;
169experimental::PostOpList<PostOpArgBroadcast> post_ops_1()
170{
171 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
172 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F});
173 post_ops.push_back_op<experimental::PostOpEltwiseAdd<PostOpArgBroadcast>>(
Ramy Elgammal451c3092022-02-01 23:01:27 +0000174 std::make_tuple(true, true, false), // If broadcast in dims 0, 1 and 2
SiCongLiafa19722021-10-24 19:12:33 +0100175 0,
176 ConvertPolicy::SATURATE);
177 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
178 return post_ops;
179}
180experimental::PostOpList<PostOpArgBroadcast> post_ops_2()
181{
182 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
183 post_ops.push_back_op<experimental::PostOpEltwiseAdd<PostOpArgBroadcast>>(
Ramy Elgammal451c3092022-02-01 23:01:27 +0000184 std::make_tuple(false, true, true), // If broadcast in dims 0, 1 and 2
SiCongLiafa19722021-10-24 19:12:33 +0100185 1,
186 ConvertPolicy::SATURATE);
187 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
188 return post_ops;
189}
190experimental::PostOpList<PostOpArgBroadcast> post_ops_3()
191{
192 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
Ramy Elgammal451c3092022-02-01 23:01:27 +0000193 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
SiCongLiafa19722021-10-24 19:12:33 +0100194 post_ops.push_back_op<experimental::PostOpEltwiseAdd<PostOpArgBroadcast>>(
Ramy Elgammal451c3092022-02-01 23:01:27 +0000195 std::make_tuple(false, false, true), // If broadcast in dims 0, 1 and 2
SiCongLiafa19722021-10-24 19:12:33 +0100196 1,
197 ConvertPolicy::SATURATE);
198 return post_ops;
199}
Ramy Elgammal451c3092022-02-01 23:01:27 +0000200// To test that the output of the main op is the first parameter in prelu post op
201experimental::PostOpList<PostOpArgBroadcast> post_ops_4()
202{
203 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
204 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F});
205 post_ops.push_back_op<experimental::PostOpEltwisePRelu<PostOpArgBroadcast>>(
206 std::make_tuple(false, false, true), // If true, broadcast in corresponding dim: 0, 1 or 2
207 0,
208 ConvertPolicy::SATURATE);
209 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
210 return post_ops;
211}
212// To test that the output of the main op is the second parameter in prelu post op i.e. it is the alpha_param
213experimental::PostOpList<PostOpArgBroadcast> post_ops_5()
214{
215 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
216 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F});
217 post_ops.push_back_op<experimental::PostOpEltwisePRelu<PostOpArgBroadcast>>(
218 std::make_tuple(false, false, false), // If true, broadcast in corresponding dim: 0, 1 or 2
219 1,
220 ConvertPolicy::SATURATE);
221 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
222 return post_ops;
223}
SiCongLiafa19722021-10-24 19:12:33 +0100224/** Different Post Op Lists */
225const auto post_op_lists = framework::dataset::make("post_op_lists", {
226 post_ops_1(),
227 post_ops_2(),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000228 post_ops_3(),
229 post_ops_4(),
230 post_ops_5()
SiCongLiafa19722021-10-24 19:12:33 +0100231 } );
232
233 bool is_post_op_list_valid(unsigned int m, unsigned int n, unsigned int k, unsigned int batch, DataType data_type, const experimental::PostOpList<ITensorInfo*>& post_ops)
234{
235 const auto lhs_info = GEMMLHSMatrixInfo(4,4,1,false,true);
236 const auto rhs_info = GEMMRHSMatrixInfo(4,4,1,true,true,false);
237
238 // Create TensorInfo for post op arguments
239 TensorInfo input0_info(TensorShape(k, m, batch), 1, data_type);
240 TensorInfo input1_info(TensorShape(n, k, batch), 1, data_type);
241 TensorInfo input2_info(TensorShape(n), 1, data_type);
242 TensorInfo output_info(TensorShape(n, m, batch), 1, data_type);
243
244 const TensorInfo reshaped_input1_info = input1_info.clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(input1_info, rhs_info));
245
246 GEMMKernelInfo gemm_info(m, n, k, 0 /**< Depth of the output tensor in case is reinterpreted as 3D */,
247 false /**< reinterpret the input as 3D */,
248 true /**< Flag used to broadcast the bias addition */,
249 false /**< wider accumm */,
250 false /**< has pad y */,
251 ActivationLayerInfo::ActivationFunction::IDENTITY,
252 1 /**< Multiplication factor for the width of the 1xW transposed block */,
253 1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
254 lhs_info,
255 rhs_info,
256 0 /**< Offset to be added to each element of the matrix A */,
257 0 /**< Offset to be added to each element of the matrix B */,
258 post_ops);
259 return bool(ClGemmMatrixMultiplyReshapedOnlyRhsKernel::validate(&input0_info.clone()->set_is_resizable(true),
260 &reshaped_input1_info.clone()->set_is_resizable(true),
261 &input2_info.clone()->set_is_resizable(true),
262 &output_info.clone()->set_is_resizable(true),1.f,1.f,
263 lhs_info,
264 rhs_info,
265 gemm_info));
266}
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000267/** Configuration test */
Sheri Zhang1a378102020-04-30 12:59:39 +0100268bool validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
269 unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100270 bool i_value_rhs, bool t_value_rhs, bool export_to_cl_image, bool broadcast_bias, bool input_as_3d, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
Sheri Zhang1a378102020-04-30 12:59:39 +0100271 DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000272{
273 const unsigned int M = m_value;
274 const unsigned int N = n_value;
275 const unsigned int K = k_value;
276
277 GEMMLHSMatrixInfo lhs_info;
278 lhs_info.m0 = m0_value;
279 lhs_info.k0 = k0_value;
280
281 GEMMRHSMatrixInfo rhs_info;
282 rhs_info.n0 = n0_value;
283 rhs_info.k0 = k0_value;
284 rhs_info.h0 = h0_value;
285 rhs_info.interleave = i_value_rhs;
Gian Marco Iodiceba5e0962019-03-11 12:17:44 +0000286 rhs_info.transpose = t_value_rhs;
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100287 rhs_info.export_to_cl_image = export_to_cl_image;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000288
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100289 GEMMKernelInfo kernel_info;
290 kernel_info.m = M;
291 kernel_info.n = N;
292 kernel_info.k = K;
Sheri Zhang1a378102020-04-30 12:59:39 +0100293 kernel_info.depth_output_gemm3d = depth_output_gemm3d;
294 kernel_info.reinterpret_input_as_3d = input_as_3d;
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100295 kernel_info.broadcast_bias = broadcast_bias;
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100296 kernel_info.activation_info = act_info;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000297
298 const TensorShape lhs_shape(K, M, b_value);
299 const TensorShape rhs_shape(N, K, b_value);
Sheri Zhang1a378102020-04-30 12:59:39 +0100300 const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000301 rhs_info);
302
Sheri Zhang1a378102020-04-30 12:59:39 +0100303 const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, dt_input0),
304 TensorInfo(rhs_shape_reshaped, 1, dt_input1),
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100305 kernel_info);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000306
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100307 const TensorShape bias_shape(N,
Sheri Zhang1a378102020-04-30 12:59:39 +0100308 M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100309 broadcast_bias? 1 : b_value);
310
Sheri Zhang1a378102020-04-30 12:59:39 +0100311 // Create tensor info
312 TensorInfo lhs = TensorInfo(lhs_shape, 1, dt_input0);
313 TensorInfo rhs_reshaped = TensorInfo(rhs_shape_reshaped, 1, dt_input1);
314 TensorInfo bias = TensorInfo(bias_shape, 1, dt_input2);
315 TensorInfo dst = TensorInfo(dst_shape, 1, dt_output);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000316
317 // Create and configure function
318 CLGEMMMatrixMultiplyReshapedOnlyRHS gemm;
Sheri Zhang1a378102020-04-30 12:59:39 +0100319 return bool(gemm.validate(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info));
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000320}
SiCongLiafa19722021-10-24 19:12:33 +0100321
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000322} // namespace
323
324TEST_SUITE(CL)
325TEST_SUITE(GEMMMatrixMultiplyReshapedOnlyRHS)
Sheri Zhang1a378102020-04-30 12:59:39 +0100326
327/** Validate tests
328 *
329 * A series of validation tests on configurations which according to the API specification
330 * the function should fail against.
331 *
332 * Checks performed in order:
333 * - Mismachting data type: input1, input2 and output need to have same data type as input0. Support data type: F32/F16.
334 * - Unsupported M0: MO can only be 1,2,3,4,5,6,7,8
335 * - Unsupported N0: NO can only be 2,3,4,8,16
336 * - Unsupported K0: KO can only be 2,3,4,8,16
337 * - Unsupported bias addition: bias broadcast mode is 0 if the input or output has to be reinterpreted as 3D
338 * - Incorrect bias diemension when bias broadcast mode is 1 and beta is not 0.0f, should be (n, 1), not (n, m)
339 * - Incorrect input0 dimension when input is reinterpreted as 3D: input0->dimension(1) * input0->dimension(2) != m
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100340 * - Correct support for creating an OpenCL image object from buffer
341 * - Incorrect support for creating an OpenCL image object from buffer. N0 is 2 but it can only be 4,8 and 16
SiCong Li5bdde852020-08-26 13:55:15 +0100342 * - Correct F16 support for creating an OpenCL image object from buffer.
Sheri Zhang1a378102020-04-30 12:59:39 +0100343 */
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100344DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
345framework::dataset::make("batch_size", { 1, 1, 1, 1, 1, 1, 2, 1, 1, 1 }),
346framework::dataset::make("M0", { 4, 9, 4, 4, 4, 4, 4, 4, 4, 4 })),
347framework::dataset::make("N0", { 4, 4, 18, 4, 4, 4, 4, 8, 2, 8 })),
348framework::dataset::make("K0", { 4, 4, 4, 1, 4, 4, 4, 4, 4, 4 })),
349framework::dataset::make("broadcast_bias", { false, false, false, false, false, true, true, false, false, false })),
350framework::dataset::make("input_as_3d", { 0, 0, 0, 0, 1, 0, 1, 0, 0, 0 })),
351framework::dataset::make("depth_output_gemm3d", { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 })),
352framework::dataset::make("export_to_cl_image", { false, false, false, false, false, false, false, true, true, true })),
353framework::dataset::make("data_type_input0", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
354framework::dataset::make("data_type_input1", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
355framework::dataset::make("data_type_input2", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
356framework::dataset::make("data_type_output", { DataType::F16, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
357framework::dataset::make("Beta", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f , 1.0f})),
SiCong Li5bdde852020-08-26 13:55:15 +0100358framework::dataset::make("Expected", { false, false, false, false, false, false, false, true, false, true })),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100359b_value, m0_value, n0_value, k0_value, broadcast_bias, input_as_3d, depth_output_gemm3d, export_to_cl_image, dt_input0, dt_intpu1, dt_input2, dt_output, beta, expected)
Sheri Zhang1a378102020-04-30 12:59:39 +0100360{
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100361 bool expected_value = expected;
362
363 // Change expected to false if the target platform does not support the OpenCL cl_khr_image2d_from_buffer extension
364 if(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()) && export_to_cl_image)
365 {
366 expected_value = false;
367 }
368
369 bool status = validate_configuration(37, 51, 23, b_value, m0_value, n0_value, k0_value, 1, false, false, export_to_cl_image, broadcast_bias, input_as_3d, depth_output_gemm3d, ActivationLayerInfo(), dt_input0, dt_intpu1, dt_input2, dt_output, 1.0f, beta);
370 ARM_COMPUTE_EXPECT(status == expected_value, framework::LogLevel::ERRORS);
Sheri Zhang1a378102020-04-30 12:59:39 +0100371}
372
SiCongLiafa19722021-10-24 19:12:33 +0100373TEST_SUITE(ValidateFusedPostOpsConfigs)
374TEST_SUITE(Invalid)
375TEST_CASE(UnsupportedPostOpSequence, framework::DatasetMode::ALL)
376{
377 const auto data_type = DataType::F32;
378 const unsigned int m = 17;
379 const unsigned int n = 1;
380 const unsigned int k = 13;
381 const unsigned int batch = 2;
382 TensorShape post_op_arg0_shape(n, m, batch);
383 TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
384 auto post_op_arg1_info = post_op_arg_info.clone();
385
386 // Unsupported sequence of post ops
387 experimental::PostOpList<ITensorInfo*> post_ops{};
388 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>(
389 &post_op_arg_info,
390 1,
391 ConvertPolicy::SATURATE);
392 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>(
393 post_op_arg1_info.get(),
394 0,
395 ConvertPolicy::SATURATE);
396
397 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS);
398}
399TEST_CASE(OutputWidened, framework::DatasetMode::ALL)
400{
401 // Invalid broadcast: post op tensors "widen" the output tensor
402 const auto data_type = DataType::F32;
403 const unsigned int m = 17;
404 const unsigned int n = 1;
405 const unsigned int k = 1;
406 const unsigned int batch = 1;
407 TensorShape post_op_arg_shape(n, m, batch + 4); // output's batch dimension is "widened", which is not allowed
408 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
409 experimental::PostOpList<ITensorInfo*> post_ops{};
410 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
411
412 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS);
413}
414TEST_CASE(BroadcastInXDimOnly, framework::DatasetMode::ALL)
415{
416 // Invalid broadcast: post op tensors broadcast in the first dimension (X) only
417 const auto data_type = DataType::F32;
418 const unsigned int m = 22;
419 const unsigned int n = 16;
420 const unsigned int k = 15;
421 const unsigned int batch = 3;
422 TensorShape post_op_arg_shape(1, m, batch);
423 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
424 experimental::PostOpList<ITensorInfo*> post_ops{};
425 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
426
427 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS);
428}
429TEST_SUITE_END() // Invalid
430TEST_SUITE(Valid)
431TEST_CASE(EmptyPostOpList, framework::DatasetMode::ALL)
432{
433 const auto data_type = DataType::F32;
434 const unsigned int m = 22;
435 const unsigned int n = 16;
436 const unsigned int k = 15;
437 const unsigned int batch = 3;
438 experimental::PostOpList<ITensorInfo*> post_ops{};
439
440 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
441}
442TEST_CASE(BroadcastInYDimOnly, framework::DatasetMode::ALL)
443{
444 const auto data_type = DataType::F32;
445 const unsigned int m = 22;
446 const unsigned int n = 16;
447 const unsigned int k = 15;
448 const unsigned int batch = 3;
449 TensorShape post_op_arg_shape(n, 1, batch);
450 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
451 experimental::PostOpList<ITensorInfo*> post_ops{};
452 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
453
454 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
455}
456TEST_CASE(BroadcastInBothXandYDims, framework::DatasetMode::ALL)
457{
458 const auto data_type = DataType::F32;
459 const unsigned int m = 22;
460 const unsigned int n = 16;
461 const unsigned int k = 15;
462 const unsigned int batch = 3;
463 TensorShape post_op_arg_shape(1, 1, batch);
464 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
465 experimental::PostOpList<ITensorInfo*> post_ops{};
466 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
467
468 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
469}
Ramy Elgammal451c3092022-02-01 23:01:27 +0000470TEST_CASE(BroadcastInAllDims, framework::DatasetMode::ALL)
471{
472 const auto data_type = DataType::F32;
473 const unsigned int m = 22;
474 const unsigned int n = 16;
475 const unsigned int k = 15;
476 const unsigned int batch = 3;
477 TensorShape post_op_arg_shape(1, 1, 1);
478 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
479 experimental::PostOpList<ITensorInfo*> post_ops{};
480 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
SiCongLiafa19722021-10-24 19:12:33 +0100481
Ramy Elgammal451c3092022-02-01 23:01:27 +0000482 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
483}
SiCongLiafa19722021-10-24 19:12:33 +0100484TEST_SUITE_END() // Valid
485TEST_SUITE_END() // ValidateFusedPostOps
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000486TEST_SUITE(Float)
487TEST_SUITE(FP32)
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000488
SiCong Lib972ae62020-08-03 15:39:45 +0100489FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingPartialInXPartialInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
490 combine(combine(
491 framework::dataset::make("M", 3),
492 framework::dataset::make("N", 1)),
493 boundary_handling_cases))
494{
495 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000496 if(validate_result)
497 {
498 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
499 }
500 else
501 {
502 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
503 framework::ARM_COMPUTE_PRINT_INFO();
504 }
SiCong Lib972ae62020-08-03 15:39:45 +0100505}
506
507FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingPartialInXFullInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
508 combine(combine(
509 framework::dataset::make("M", 64),
510 framework::dataset::make("N", 43)),
511 boundary_handling_cases))
512{
513 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000514 if(validate_result)
515 {
516 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
517 }
518 else
519 {
520 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
521 framework::ARM_COMPUTE_PRINT_INFO();
522 }
SiCong Lib972ae62020-08-03 15:39:45 +0100523}
524
525FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingFullInXFullInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
526 combine(combine(
527 framework::dataset::make("M", 64),
528 framework::dataset::make("N", 32)),
529 boundary_handling_cases))
530{
531 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000532 if(validate_result)
533 {
534 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
535 }
536 else
537 {
538 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
539 framework::ARM_COMPUTE_PRINT_INFO();
540 }
SiCong Lib972ae62020-08-03 15:39:45 +0100541}
542
543FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingFullInXPartialInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
544 combine(combine(
545 framework::dataset::make("M", 37),
546 framework::dataset::make("N", 32)),
547 boundary_handling_cases))
548{
549 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000550 if(validate_result)
551 {
552 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
553 }
554 else
555 {
556 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
557 framework::ARM_COMPUTE_PRINT_INFO();
558 }
SiCong Lib972ae62020-08-03 15:39:45 +0100559}
560
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100561FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
562 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000563 m_values,
564 n_values),
565 k_values),
566 b_values),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100567 m0_values_precommit),
568 n0_values_precommit),
569 k0_values_precommit),
Sheri Zhang1a378102020-04-30 12:59:39 +0100570 h0_values),
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000571 i_values_rhs),
572 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000573 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100574 framework::dataset::make("DataType", DataType::F32)),
575 a_values),
576 beta_values),
577 broadcast_bias_values),
578 act_values))
579{
580 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000581 if(validate_result)
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100582 {
583 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
584 }
585 else
586 {
587 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
588 framework::ARM_COMPUTE_PRINT_INFO();
589 }
590}
591
592FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::NIGHTLY,
593 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
594 m_values,
595 n_values),
596 k_values),
597 b_values),
598 m0_values_nightly),
599 n0_values_nightly),
600 k0_values_nightly),
601 h0_values),
602 i_values_rhs),
603 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000604 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100605 framework::dataset::make("DataType", DataType::F32)),
606 a_values),
607 beta_values),
608 broadcast_bias_values),
609 act_values))
610{
611 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000612 if(validate_result)
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100613 {
614 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
615 }
616 else
617 {
618 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
619 framework::ARM_COMPUTE_PRINT_INFO();
620 }
621}
622
623FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::PRECOMMIT,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100624 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100625 m_w_values,
626 m_h_values),
627 n_values),
628 k_values),
629 b_values),
630 m0_values_precommit),
631 n0_values_precommit),
632 k0_values_precommit),
633 h0_values),
634 i_values_rhs),
635 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000636 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000637 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100638 framework::dataset::make("DataType", DataType::F32)),
639 a_values),
640 beta_values),
641 act_values))
642{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000643 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
644 if(validate_result)
645 {
646 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
647 }
648 else
649 {
650 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
651 framework::ARM_COMPUTE_PRINT_INFO();
652 }
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100653}
654
655FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::NIGHTLY,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100656 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100657 m_w_values,
658 m_h_values),
659 n_values),
660 k_values),
661 b_values),
662 m0_values_nightly),
663 n0_values_nightly),
664 k0_values_nightly),
665 h0_values),
666 i_values_rhs),
667 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000668 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000669 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100670 framework::dataset::make("DataType", DataType::F32)),
671 a_values),
672 beta_values),
673 act_values))
674{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000675 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
676 if(validate_result)
677 {
678 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
679 }
680 else
681 {
682 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
683 framework::ARM_COMPUTE_PRINT_INFO();
684 }
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100685}
SiCongLiafa19722021-10-24 19:12:33 +0100686
687TEST_SUITE(FusedPostOps)
688
689FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture<float>, framework::DatasetMode::ALL,
690 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
691 m_values,
692 n_values),
693 k_values),
694 b_values),
695 m0_values_precommit),
696 n0_values_precommit),
697 k0_values_precommit),
698 framework::dataset::make("H0", {1})),
699 framework::dataset::make("interleave_rhs", { true })),
700 t_values_rhs),
SiCongLibc788382021-11-02 14:52:02 +0000701 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
SiCongLiafa19722021-10-24 19:12:33 +0100702 framework::dataset::make("DataType", DataType::F32)),
703 a_values),
704 beta_values),
705 framework::dataset::make("broadcast_bias", { false } )),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000706 act_values),
SiCongLiafa19722021-10-24 19:12:33 +0100707 post_op_lists)
708 )
709{
710 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
711 if(validate_result)
712 {
713 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
714 }
715 else
716 {
717 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
718 framework::ARM_COMPUTE_PRINT_INFO();
719 }
720}
721
722TEST_SUITE_END() // FusedPostOps
723
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000724TEST_SUITE_END() // FP32
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100725
726TEST_SUITE(FP16)
727FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<half>, framework::DatasetMode::PRECOMMIT,
728 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
729 m_values,
730 n_values),
731 k_values),
732 b_values),
733 m0_values_precommit),
734 n0_values_precommit),
735 k0_values_precommit),
736 h0_values),
737 i_values_rhs),
738 t_values_rhs),
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100739 framework::dataset::make("export_to_cl_image_rhs", true)),
740 framework::dataset::make("DataType", DataType::F16)),
741 a_values),
742 beta_values),
743 broadcast_bias_values),
744 act_values))
745{
746 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000747 if(validate_result)
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100748 {
749 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
750 }
751 else
752 {
753 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
754 framework::ARM_COMPUTE_PRINT_INFO();
755 }
756}
757
758FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<half>, framework::DatasetMode::NIGHTLY,
759 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
760 m_values,
761 n_values),
762 k_values),
763 b_values),
764 m0_values_nightly),
765 n0_values_nightly),
766 k0_values_nightly),
767 h0_values),
768 i_values_rhs),
769 t_values_rhs),
770 framework::dataset::make("export_to_cl_image_rhs", true)),
771 framework::dataset::make("DataType", DataType::F16)),
772 a_values),
773 beta_values),
774 broadcast_bias_values),
775 act_values))
776{
777 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000778 if(validate_result)
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100779 {
780 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
781 }
782 else
783 {
784 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
785 framework::ARM_COMPUTE_PRINT_INFO();
786 }
787}
788
789FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::PRECOMMIT,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100790 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100791 m_w_values,
792 m_h_values),
793 n_values),
794 k_values),
795 b_values),
796 m0_values_precommit),
797 n0_values_precommit),
798 k0_values_precommit),
799 h0_values),
800 i_values_rhs),
801 t_values_rhs),
802 framework::dataset::make("export_to_cl_image_rhs", true)),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000803 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100804 framework::dataset::make("DataType", DataType::F16)),
805 a_values),
806 beta_values),
807 act_values))
808{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000809 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
810 if(validate_result)
811 {
812 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
813 }
814 else
815 {
816 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
817 framework::ARM_COMPUTE_PRINT_INFO();
818 }
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100819}
820
821FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::NIGHTLY,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100822 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100823 m_w_values,
824 m_h_values),
825 n_values),
826 k_values),
827 b_values),
828 m0_values_nightly),
829 n0_values_nightly),
830 k0_values_nightly),
831 h0_values),
832 i_values_rhs),
833 t_values_rhs),
834 framework::dataset::make("export_to_cl_image_rhs", true)),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000835 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100836 framework::dataset::make("DataType", DataType::F16)),
837 a_values),
838 beta_values),
839 act_values))
840{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000841 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
842 if(validate_result)
843 {
844 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
845 }
846 else
847 {
848 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
849 framework::ARM_COMPUTE_PRINT_INFO();
850 }
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100851}
SiCongLiafa19722021-10-24 19:12:33 +0100852TEST_SUITE(FusedPostOps)
853
854FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture<half>, framework::DatasetMode::ALL,
855 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
856 m_values,
857 n_values),
858 k_values),
859 b_values),
860 m0_values_precommit),
861 n0_values_precommit),
862 k0_values_precommit),
863 framework::dataset::make("H0", {1})),
864 framework::dataset::make("interleave_rhs", { true })),
865 t_values_rhs),
866 framework::dataset::make("export_to_cl_image_rhs", true)),
867 framework::dataset::make("DataType", DataType::F16)),
868 a_values),
869 beta_values),
870 framework::dataset::make("broadcast_bias", { false } )),
Ramy Elgammal451c3092022-02-01 23:01:27 +0000871 act_values),
SiCongLiafa19722021-10-24 19:12:33 +0100872 post_op_lists)
873 )
874{
875 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
876 if(validate_result)
877 {
878 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
879 }
880 else
881 {
882 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
883 framework::ARM_COMPUTE_PRINT_INFO();
884 }
885}
886
887TEST_SUITE_END() // FusedPostOps
888
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100889TEST_SUITE_END() // FP16
890
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000891TEST_SUITE_END() // Float
892TEST_SUITE_END() // GEMMMatrixMulipltyReshapedOnlyRHS
893TEST_SUITE_END() // CL
894} // namespace validation
895} // namespace test
Georgios Pinitasb0f342e2019-05-21 13:32:43 +0100896} // namespace arm_compute