blob: ca63d3a679dddc6d347dc385255bf799181ba45c [file] [log] [blame]
Gian Marco Iodiceadc53952019-02-15 11:10:31 +00001/*
Giorgio Arena2ab585b2021-02-25 15:41:49 +00002 * Copyright (c) 2019-2021 Arm Limited.
Gian Marco Iodiceadc53952019-02-15 11:10:31 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marco Iodice7026b302019-06-26 17:18:11 +010024#include "arm_compute/core/KernelDescriptors.h"
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000025#include "arm_compute/core/Types.h"
SiCongLid5694c92021-11-12 17:33:45 +000026#include "arm_compute/core/experimental/PostOps.h"
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000027#include "arm_compute/core/utils/misc/ShapeCalculator.h"
28#include "arm_compute/runtime/CL/CLTensor.h"
29#include "arm_compute/runtime/CL/CLTensorAllocator.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010030#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h"
31#include "src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000032#include "tests/CL/CLAccessor.h"
33#include "tests/CL/Helper.h"
34#include "tests/PaddingCalculator.h"
35#include "tests/datasets/ShapeDatasets.h"
36#include "tests/framework/Asserts.h"
37#include "tests/framework/Macros.h"
38#include "tests/framework/datasets/Datasets.h"
39#include "tests/validation/Validation.h"
40#include "tests/validation/fixtures/GEMMFixture.h"
41
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48using namespace arm_compute::misc::shape_calculator;
Georgios Pinitas856f66e2021-04-22 21:13:21 +010049using namespace arm_compute::opencl::kernels;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000050
Georgios Pinitas856f66e2021-04-22 21:13:21 +010051// Create function for ClGemmReshapeRhsMatrixKernel
52using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<ClGemmReshapeRhsMatrixKernel>;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000053
Georgios Pinitas856f66e2021-04-22 21:13:21 +010054// Create function for ClGemmMatrixMultiplyReshapedOnlyRhsKernel
55using CLGEMMMatrixMultiplyReshapedOnlyRHS = CLSynthetizeOperator<ClGemmMatrixMultiplyReshapedOnlyRhsKernel>;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000056
57// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS
58template <typename T>
59using CLGEMMMatrixMultiplyReshapedOnlyRHSFixture = GEMMMatrixMultiplyReshapedOnlyRHSValidationFixture<CLTensor, CLAccessor, T, CLGEMMReshapeRHSMatrix, CLGEMMMatrixMultiplyReshapedOnlyRHS>;
60
61// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS3D
62template <typename T>
63using CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture = GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture<CLTensor, CLAccessor, T, CLGEMMReshapeRHSMatrix, CLGEMMMatrixMultiplyReshapedOnlyRHS>;
64
SiCongLiafa19722021-10-24 19:12:33 +010065// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS with post ops
66template <typename T>
67using CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture =
68 GEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsValidationFixture<CLTensor, CLAccessor, T, CLGEMMReshapeRHSMatrix, CLGEMMMatrixMultiplyReshapedOnlyRHS>;
69
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000070namespace
71{
72// *INDENT-OFF*
73// clang-format off
74RelativeTolerance<float> rel_tolerance_f32(0.001f);
75constexpr float abs_tolerance_f32(0.0001f);
76
Gian Marco Iodice781cba72020-06-19 16:56:57 +010077RelativeTolerance<float> rel_tolerance_f16(0.001f);
78constexpr float abs_tolerance_f16(0.01f);
79
Sheri Zhang1a378102020-04-30 12:59:39 +010080/** Alpha values to test */
81const auto a_values = framework::dataset::make("alpha", {-0.75f} );
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000082
Sheri Zhang1a378102020-04-30 12:59:39 +010083/** Beta values to test */
Gian Marco Iodice6f931342020-09-15 14:17:41 +010084const auto beta_values = framework::dataset::make("beta", {-0.35f} );
Georgios Pinitasb0f342e2019-05-21 13:32:43 +010085
Gian Marco Iodiceadc53952019-02-15 11:10:31 +000086/** M values to test */
87const auto m_values = framework::dataset::make("M", 37);
88
89/** M_W values to test */
90const auto m_w_values = framework::dataset::make("M_W", 5);
91
92/** M_H values to test */
93const auto m_h_values = framework::dataset::make("M_H", 7);
94
95/** N values to test */
96const auto n_values = framework::dataset::make("N", 51);
97
98/** K values to test */
99const auto k_values = framework::dataset::make("K", 23);
100
101/** Batch size values to test */
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100102const auto b_values = framework::dataset::make("batch_size", 2);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000103
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100104/** Activation values to test */
105const auto act_values = framework::dataset::make("Activation",
106{
Giorgio Arena2ab585b2021-02-25 15:41:49 +0000107 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 10.f),
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100108});
109
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100110/** M0 values to test - precommit */
111const auto m0_values_precommit = framework::dataset::make("M0", { 4 });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000112
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100113/** N0 values to test - precommit*/
114const auto n0_values_precommit = framework::dataset::make("N0", { 4 });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000115
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100116/** K0 values to test - precommit*/
117const auto k0_values_precommit = framework::dataset::make("K0", { 4 });
118
119/** M0 values to test - nightly */
120const auto m0_values_nightly = framework::dataset::make("M0", { 8 });
121
122/** N0 values to test - nightly */
123const auto n0_values_nightly = framework::dataset::make("N0", { 16 });
124
125/** K0 values to test - nightly */
126const auto k0_values_nightly = framework::dataset::make("K0", { 16 });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000127
Sheri Zhang1a378102020-04-30 12:59:39 +0100128/** H0 values to test */
129const auto h0_values = framework::dataset::make("H0", 1, 3);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000130
131/** Interleave values to test with RHS matrix */
132const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, false });
133
134/** Transpose values to test with RHS matrix */
Gian Marco Iodiceba5e0962019-03-11 12:17:44 +0000135const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, false });
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000136
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100137/** Broadcast bias from vector to matrix */
Gian Marco Iodiced820db62019-08-05 14:23:23 +0100138const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } );
Georgios Pinitasb0f342e2019-05-21 13:32:43 +0100139
SiCong Lib972ae62020-08-03 15:39:45 +0100140/** Boundary handling cases for testing partial/non-partial (full) block dimensions, resulting from different combinations
141 * of M, M0, N and N0 values.
142 * M0 and N0 are kept constant, while the different test cases need to vary M and N.
143 *
144 * Eg. M = 64 and N = 33 result in a block dimension that has no partial blocks (all full blocks) in Y dimension and
145 * parital blocks in X dimension.
146 */
147const auto boundary_handling_cases = combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
148 // Large k to force potential out-of-bound reads on input0
149 framework::dataset::make("K", 315),
150 // Batch size == 1 to force potential out-of-bound reads on input0
151 framework::dataset::make("batch_size", 1)),
152 framework::dataset::make("M0", 4)),
153 framework::dataset::make("N0", 4)),
154 framework::dataset::make("K0", 4)),
155 framework::dataset::make("H0", 3)),
156 i_values_rhs),
157 t_values_rhs),
158 framework::dataset::make("export_to_cl_image_rhs", {true, false})),
159 // Only need to test F32 as F16 shares identical boundary handling logics
160 framework::dataset::make("DataType", DataType::F32)),
161 framework::dataset::make("alpha", -0.75f )),
162 framework::dataset::make("beta", -0.35f )),
163 broadcast_bias_values),
164 framework::dataset::make("Activation", ActivationLayerInfo()));
165
SiCongLiafa19722021-10-24 19:12:33 +0100166/** Post Ops */
167using PostOpArgBroadcast = CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture<float>::PostOpArgBroadcast;
168experimental::PostOpList<PostOpArgBroadcast> post_ops_1()
169{
170 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
171 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F});
172 post_ops.push_back_op<experimental::PostOpEltwiseAdd<PostOpArgBroadcast>>(
173 std::make_tuple(true, true, false), // If broadcast in dims 0, 1 and 2
174 0,
175 ConvertPolicy::SATURATE);
176 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
177 return post_ops;
178}
179experimental::PostOpList<PostOpArgBroadcast> post_ops_2()
180{
181 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
182 post_ops.push_back_op<experimental::PostOpEltwiseAdd<PostOpArgBroadcast>>(
183 std::make_tuple(false, true, true), // If broadcast in dims 0, 1 and 2
184 1,
185 ConvertPolicy::SATURATE);
186 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
187 return post_ops;
188}
189experimental::PostOpList<PostOpArgBroadcast> post_ops_3()
190{
191 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
192 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
193 post_ops.push_back_op<experimental::PostOpEltwiseAdd<PostOpArgBroadcast>>(
194 std::make_tuple(false, false, true), // If broadcast in dims 0, 1 and 2
195 1,
196 ConvertPolicy::SATURATE);
197 return post_ops;
198}
ramelg016049eda2021-10-29 10:52:53 +0100199// To test that the output of the main op is the first parameter in prelu post op
200experimental::PostOpList<PostOpArgBroadcast> post_ops_4()
201{
202 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
203 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F});
204 post_ops.push_back_op<experimental::PostOpEltwisePRelu<PostOpArgBroadcast>>(
205 std::make_tuple(false, false, true), // If true, broadcast in corresponding dim: 0, 1 or 2
206 0,
207 ConvertPolicy::SATURATE);
208 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
209 return post_ops;
210}
211// To test that the output of the main op is the second parameter in prelu post op i.e. it is the alpha_param
212experimental::PostOpList<PostOpArgBroadcast> post_ops_5()
213{
214 experimental::PostOpList<PostOpArgBroadcast> post_ops{};
215 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::LINEAR, 0.5F, 0.0F});
216 post_ops.push_back_op<experimental::PostOpEltwisePRelu<PostOpArgBroadcast>>(
217 std::make_tuple(false, false, false), // If true, broadcast in corresponding dim: 0, 1 or 2
218 1,
219 ConvertPolicy::SATURATE);
220 post_ops.push_back_op<experimental::PostOpAct<PostOpArgBroadcast>>(ActivationLayerInfo{ActivationLayerInfo::ActivationFunction::RELU, 2.1F, 1.3F});
221 return post_ops;
222}
SiCongLiafa19722021-10-24 19:12:33 +0100223/** Different Post Op Lists */
224const auto post_op_lists = framework::dataset::make("post_op_lists", {
225 post_ops_1(),
226 post_ops_2(),
227 post_ops_3(),
ramelg016049eda2021-10-29 10:52:53 +0100228 post_ops_4(),
229 post_ops_5()
SiCongLiafa19722021-10-24 19:12:33 +0100230 } );
231
232 bool is_post_op_list_valid(unsigned int m, unsigned int n, unsigned int k, unsigned int batch, DataType data_type, const experimental::PostOpList<ITensorInfo*>& post_ops)
233{
234 const auto lhs_info = GEMMLHSMatrixInfo(4,4,1,false,true);
235 const auto rhs_info = GEMMRHSMatrixInfo(4,4,1,true,true,false);
236
237 // Create TensorInfo for post op arguments
238 TensorInfo input0_info(TensorShape(k, m, batch), 1, data_type);
239 TensorInfo input1_info(TensorShape(n, k, batch), 1, data_type);
240 TensorInfo input2_info(TensorShape(n), 1, data_type);
241 TensorInfo output_info(TensorShape(n, m, batch), 1, data_type);
242
243 const TensorInfo reshaped_input1_info = input1_info.clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(input1_info, rhs_info));
244
245 GEMMKernelInfo gemm_info(m, n, k, 0 /**< Depth of the output tensor in case is reinterpreted as 3D */,
246 false /**< reinterpret the input as 3D */,
247 true /**< Flag used to broadcast the bias addition */,
248 false /**< wider accumm */,
249 false /**< has pad y */,
250 ActivationLayerInfo::ActivationFunction::IDENTITY,
251 1 /**< Multiplication factor for the width of the 1xW transposed block */,
252 1 /**< Multiplication factor for the height of the 4x4 interleaved block */,
253 lhs_info,
254 rhs_info,
255 0 /**< Offset to be added to each element of the matrix A */,
256 0 /**< Offset to be added to each element of the matrix B */,
257 post_ops);
258 return bool(ClGemmMatrixMultiplyReshapedOnlyRhsKernel::validate(&input0_info.clone()->set_is_resizable(true),
259 &reshaped_input1_info.clone()->set_is_resizable(true),
260 &input2_info.clone()->set_is_resizable(true),
261 &output_info.clone()->set_is_resizable(true),1.f,1.f,
262 lhs_info,
263 rhs_info,
264 gemm_info));
265}
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000266/** Configuration test */
Sheri Zhang1a378102020-04-30 12:59:39 +0100267bool validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
268 unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100269 bool i_value_rhs, bool t_value_rhs, bool export_to_cl_image, bool broadcast_bias, bool input_as_3d, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
Sheri Zhang1a378102020-04-30 12:59:39 +0100270 DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000271{
272 const unsigned int M = m_value;
273 const unsigned int N = n_value;
274 const unsigned int K = k_value;
275
276 GEMMLHSMatrixInfo lhs_info;
277 lhs_info.m0 = m0_value;
278 lhs_info.k0 = k0_value;
279
280 GEMMRHSMatrixInfo rhs_info;
281 rhs_info.n0 = n0_value;
282 rhs_info.k0 = k0_value;
283 rhs_info.h0 = h0_value;
284 rhs_info.interleave = i_value_rhs;
Gian Marco Iodiceba5e0962019-03-11 12:17:44 +0000285 rhs_info.transpose = t_value_rhs;
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100286 rhs_info.export_to_cl_image = export_to_cl_image;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000287
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100288 GEMMKernelInfo kernel_info;
289 kernel_info.m = M;
290 kernel_info.n = N;
291 kernel_info.k = K;
Sheri Zhang1a378102020-04-30 12:59:39 +0100292 kernel_info.depth_output_gemm3d = depth_output_gemm3d;
293 kernel_info.reinterpret_input_as_3d = input_as_3d;
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100294 kernel_info.broadcast_bias = broadcast_bias;
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +0100295 kernel_info.activation_info = act_info;
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000296
297 const TensorShape lhs_shape(K, M, b_value);
298 const TensorShape rhs_shape(N, K, b_value);
Sheri Zhang1a378102020-04-30 12:59:39 +0100299 const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000300 rhs_info);
301
Sheri Zhang1a378102020-04-30 12:59:39 +0100302 const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, dt_input0),
303 TensorInfo(rhs_shape_reshaped, 1, dt_input1),
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100304 kernel_info);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000305
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100306 const TensorShape bias_shape(N,
Sheri Zhang1a378102020-04-30 12:59:39 +0100307 M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100308 broadcast_bias? 1 : b_value);
309
Sheri Zhang1a378102020-04-30 12:59:39 +0100310 // Create tensor info
311 TensorInfo lhs = TensorInfo(lhs_shape, 1, dt_input0);
312 TensorInfo rhs_reshaped = TensorInfo(rhs_shape_reshaped, 1, dt_input1);
313 TensorInfo bias = TensorInfo(bias_shape, 1, dt_input2);
314 TensorInfo dst = TensorInfo(dst_shape, 1, dt_output);
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000315
316 // Create and configure function
317 CLGEMMMatrixMultiplyReshapedOnlyRHS gemm;
Sheri Zhang1a378102020-04-30 12:59:39 +0100318 return bool(gemm.validate(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info));
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000319}
SiCongLiafa19722021-10-24 19:12:33 +0100320
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000321} // namespace
322
323TEST_SUITE(CL)
324TEST_SUITE(GEMMMatrixMultiplyReshapedOnlyRHS)
Sheri Zhang1a378102020-04-30 12:59:39 +0100325
326/** Validate tests
327 *
328 * A series of validation tests on configurations which according to the API specification
329 * the function should fail against.
330 *
331 * Checks performed in order:
332 * - Mismachting data type: input1, input2 and output need to have same data type as input0. Support data type: F32/F16.
333 * - Unsupported M0: MO can only be 1,2,3,4,5,6,7,8
334 * - Unsupported N0: NO can only be 2,3,4,8,16
335 * - Unsupported K0: KO can only be 2,3,4,8,16
336 * - Unsupported bias addition: bias broadcast mode is 0 if the input or output has to be reinterpreted as 3D
337 * - Incorrect bias diemension when bias broadcast mode is 1 and beta is not 0.0f, should be (n, 1), not (n, m)
338 * - Incorrect input0 dimension when input is reinterpreted as 3D: input0->dimension(1) * input0->dimension(2) != m
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100339 * - Correct support for creating an OpenCL image object from buffer
340 * - Incorrect support for creating an OpenCL image object from buffer. N0 is 2 but it can only be 4,8 and 16
SiCong Li5bdde852020-08-26 13:55:15 +0100341 * - Correct F16 support for creating an OpenCL image object from buffer.
Sheri Zhang1a378102020-04-30 12:59:39 +0100342 */
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100343DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(
344framework::dataset::make("batch_size", { 1, 1, 1, 1, 1, 1, 2, 1, 1, 1 }),
345framework::dataset::make("M0", { 4, 9, 4, 4, 4, 4, 4, 4, 4, 4 })),
346framework::dataset::make("N0", { 4, 4, 18, 4, 4, 4, 4, 8, 2, 8 })),
347framework::dataset::make("K0", { 4, 4, 4, 1, 4, 4, 4, 4, 4, 4 })),
348framework::dataset::make("broadcast_bias", { false, false, false, false, false, true, true, false, false, false })),
349framework::dataset::make("input_as_3d", { 0, 0, 0, 0, 1, 0, 1, 0, 0, 0 })),
350framework::dataset::make("depth_output_gemm3d", { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 })),
351framework::dataset::make("export_to_cl_image", { false, false, false, false, false, false, false, true, true, true })),
352framework::dataset::make("data_type_input0", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
353framework::dataset::make("data_type_input1", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
354framework::dataset::make("data_type_input2", { DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
355framework::dataset::make("data_type_output", { DataType::F16, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F32, DataType::F16})),
356framework::dataset::make("Beta", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f , 1.0f})),
SiCong Li5bdde852020-08-26 13:55:15 +0100357framework::dataset::make("Expected", { false, false, false, false, false, false, false, true, false, true })),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100358b_value, m0_value, n0_value, k0_value, broadcast_bias, input_as_3d, depth_output_gemm3d, export_to_cl_image, dt_input0, dt_intpu1, dt_input2, dt_output, beta, expected)
Sheri Zhang1a378102020-04-30 12:59:39 +0100359{
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100360 bool expected_value = expected;
361
362 // Change expected to false if the target platform does not support the OpenCL cl_khr_image2d_from_buffer extension
363 if(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()) && export_to_cl_image)
364 {
365 expected_value = false;
366 }
367
368 bool status = validate_configuration(37, 51, 23, b_value, m0_value, n0_value, k0_value, 1, false, false, export_to_cl_image, broadcast_bias, input_as_3d, depth_output_gemm3d, ActivationLayerInfo(), dt_input0, dt_intpu1, dt_input2, dt_output, 1.0f, beta);
369 ARM_COMPUTE_EXPECT(status == expected_value, framework::LogLevel::ERRORS);
Sheri Zhang1a378102020-04-30 12:59:39 +0100370}
371
SiCongLiafa19722021-10-24 19:12:33 +0100372TEST_SUITE(ValidateFusedPostOpsConfigs)
373TEST_SUITE(Invalid)
374TEST_CASE(UnsupportedPostOpSequence, framework::DatasetMode::ALL)
375{
376 const auto data_type = DataType::F32;
377 const unsigned int m = 17;
378 const unsigned int n = 1;
379 const unsigned int k = 13;
380 const unsigned int batch = 2;
381 TensorShape post_op_arg0_shape(n, m, batch);
382 TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
383 auto post_op_arg1_info = post_op_arg_info.clone();
384
385 // Unsupported sequence of post ops
386 experimental::PostOpList<ITensorInfo*> post_ops{};
387 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>(
388 &post_op_arg_info,
389 1,
390 ConvertPolicy::SATURATE);
391 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>(
392 post_op_arg1_info.get(),
393 0,
394 ConvertPolicy::SATURATE);
395
396 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS);
397}
398TEST_CASE(OutputWidened, framework::DatasetMode::ALL)
399{
400 // Invalid broadcast: post op tensors "widen" the output tensor
401 const auto data_type = DataType::F32;
402 const unsigned int m = 17;
403 const unsigned int n = 1;
404 const unsigned int k = 1;
405 const unsigned int batch = 1;
406 TensorShape post_op_arg_shape(n, m, batch + 4); // output's batch dimension is "widened", which is not allowed
407 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
408 experimental::PostOpList<ITensorInfo*> post_ops{};
409 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
410
411 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS);
412}
413TEST_CASE(BroadcastInXDimOnly, framework::DatasetMode::ALL)
414{
415 // Invalid broadcast: post op tensors broadcast in the first dimension (X) only
416 const auto data_type = DataType::F32;
417 const unsigned int m = 22;
418 const unsigned int n = 16;
419 const unsigned int k = 15;
420 const unsigned int batch = 3;
421 TensorShape post_op_arg_shape(1, m, batch);
422 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
423 experimental::PostOpList<ITensorInfo*> post_ops{};
424 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
425
426 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == false, framework::LogLevel::ERRORS);
427}
428TEST_SUITE_END() // Invalid
429TEST_SUITE(Valid)
430TEST_CASE(EmptyPostOpList, framework::DatasetMode::ALL)
431{
432 const auto data_type = DataType::F32;
433 const unsigned int m = 22;
434 const unsigned int n = 16;
435 const unsigned int k = 15;
436 const unsigned int batch = 3;
437 experimental::PostOpList<ITensorInfo*> post_ops{};
438
439 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
440}
441TEST_CASE(BroadcastInYDimOnly, framework::DatasetMode::ALL)
442{
443 const auto data_type = DataType::F32;
444 const unsigned int m = 22;
445 const unsigned int n = 16;
446 const unsigned int k = 15;
447 const unsigned int batch = 3;
448 TensorShape post_op_arg_shape(n, 1, batch);
449 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
450 experimental::PostOpList<ITensorInfo*> post_ops{};
451 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
452
453 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
454}
455TEST_CASE(BroadcastInBothXandYDims, framework::DatasetMode::ALL)
456{
457 const auto data_type = DataType::F32;
458 const unsigned int m = 22;
459 const unsigned int n = 16;
460 const unsigned int k = 15;
461 const unsigned int batch = 3;
462 TensorShape post_op_arg_shape(1, 1, batch);
463 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
464 experimental::PostOpList<ITensorInfo*> post_ops{};
465 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
466
467 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
468}
469TEST_CASE(BroadcastInAllDims, framework::DatasetMode::ALL)
470{
471 const auto data_type = DataType::F32;
472 const unsigned int m = 22;
473 const unsigned int n = 16;
474 const unsigned int k = 15;
475 const unsigned int batch = 3;
476 TensorShape post_op_arg_shape(1, 1, 1);
477 TensorInfo post_op_arg_info(post_op_arg_shape, 1, data_type);
478 experimental::PostOpList<ITensorInfo*> post_ops{};
479 post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo*>>( &post_op_arg_info, 0, ConvertPolicy::SATURATE);
480
481 ARM_COMPUTE_EXPECT(is_post_op_list_valid(m, n, k, batch, data_type, post_ops) == true, framework::LogLevel::ERRORS);
482}
483TEST_SUITE_END() // Valid
484TEST_SUITE_END() // ValidateFusedPostOps
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000485TEST_SUITE(Float)
486TEST_SUITE(FP32)
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000487
SiCong Lib972ae62020-08-03 15:39:45 +0100488FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingPartialInXPartialInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
489 combine(combine(
490 framework::dataset::make("M", 3),
491 framework::dataset::make("N", 1)),
492 boundary_handling_cases))
493{
494 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000495 if(validate_result)
496 {
497 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
498 }
499 else
500 {
501 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
502 framework::ARM_COMPUTE_PRINT_INFO();
503 }
SiCong Lib972ae62020-08-03 15:39:45 +0100504}
505
506FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingPartialInXFullInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
507 combine(combine(
508 framework::dataset::make("M", 64),
509 framework::dataset::make("N", 43)),
510 boundary_handling_cases))
511{
512 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000513 if(validate_result)
514 {
515 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
516 }
517 else
518 {
519 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
520 framework::ARM_COMPUTE_PRINT_INFO();
521 }
SiCong Lib972ae62020-08-03 15:39:45 +0100522}
523
524FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingFullInXFullInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
525 combine(combine(
526 framework::dataset::make("M", 64),
527 framework::dataset::make("N", 32)),
528 boundary_handling_cases))
529{
530 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000531 if(validate_result)
532 {
533 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
534 }
535 else
536 {
537 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
538 framework::ARM_COMPUTE_PRINT_INFO();
539 }
SiCong Lib972ae62020-08-03 15:39:45 +0100540}
541
542FIXTURE_DATA_TEST_CASE(RunPrecommitBoundaryHandlingFullInXPartialInY, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
543 combine(combine(
544 framework::dataset::make("M", 37),
545 framework::dataset::make("N", 32)),
546 boundary_handling_cases))
547{
548 // Validate output
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000549 if(validate_result)
550 {
551 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
552 }
553 else
554 {
555 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
556 framework::ARM_COMPUTE_PRINT_INFO();
557 }
SiCong Lib972ae62020-08-03 15:39:45 +0100558}
559
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100560FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::PRECOMMIT,
561 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000562 m_values,
563 n_values),
564 k_values),
565 b_values),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100566 m0_values_precommit),
567 n0_values_precommit),
568 k0_values_precommit),
Sheri Zhang1a378102020-04-30 12:59:39 +0100569 h0_values),
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000570 i_values_rhs),
571 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000572 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100573 framework::dataset::make("DataType", DataType::F32)),
574 a_values),
575 beta_values),
576 broadcast_bias_values),
577 act_values))
578{
579 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000580 if(validate_result)
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100581 {
582 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
583 }
584 else
585 {
586 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
587 framework::ARM_COMPUTE_PRINT_INFO();
588 }
589}
590
591FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<float>, framework::DatasetMode::NIGHTLY,
592 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
593 m_values,
594 n_values),
595 k_values),
596 b_values),
597 m0_values_nightly),
598 n0_values_nightly),
599 k0_values_nightly),
600 h0_values),
601 i_values_rhs),
602 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000603 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100604 framework::dataset::make("DataType", DataType::F32)),
605 a_values),
606 beta_values),
607 broadcast_bias_values),
608 act_values))
609{
610 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000611 if(validate_result)
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100612 {
613 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
614 }
615 else
616 {
617 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
618 framework::ARM_COMPUTE_PRINT_INFO();
619 }
620}
621
622FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::PRECOMMIT,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100623 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100624 m_w_values,
625 m_h_values),
626 n_values),
627 k_values),
628 b_values),
629 m0_values_precommit),
630 n0_values_precommit),
631 k0_values_precommit),
632 h0_values),
633 i_values_rhs),
634 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000635 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100636 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100637 framework::dataset::make("DataType", DataType::F32)),
638 a_values),
639 beta_values),
640 act_values))
641{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000642 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
643 if(validate_result)
644 {
645 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
646 }
647 else
648 {
649 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
650 framework::ARM_COMPUTE_PRINT_INFO();
651 }
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100652}
653
654FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<float>, framework::DatasetMode::NIGHTLY,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100655 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100656 m_w_values,
657 m_h_values),
658 n_values),
659 k_values),
660 b_values),
661 m0_values_nightly),
662 n0_values_nightly),
663 k0_values_nightly),
664 h0_values),
665 i_values_rhs),
666 t_values_rhs),
Manuel Bottini827817e2020-11-19 12:12:06 +0000667 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100668 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100669 framework::dataset::make("DataType", DataType::F32)),
670 a_values),
671 beta_values),
672 act_values))
673{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000674 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
675 if(validate_result)
676 {
677 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
678 }
679 else
680 {
681 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
682 framework::ARM_COMPUTE_PRINT_INFO();
683 }
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100684}
SiCongLiafa19722021-10-24 19:12:33 +0100685
686TEST_SUITE(FusedPostOps)
687
688FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture<float>, framework::DatasetMode::ALL,
689 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
690 m_values,
691 n_values),
692 k_values),
693 b_values),
694 m0_values_precommit),
695 n0_values_precommit),
696 k0_values_precommit),
697 framework::dataset::make("H0", {1})),
698 framework::dataset::make("interleave_rhs", { true })),
699 t_values_rhs),
SiCongLibc788382021-11-02 14:52:02 +0000700 framework::dataset::make("export_to_cl_image_rhs", {false, true})),
SiCongLiafa19722021-10-24 19:12:33 +0100701 framework::dataset::make("DataType", DataType::F32)),
702 a_values),
703 beta_values),
704 framework::dataset::make("broadcast_bias", { false } )),
705 act_values),
706 post_op_lists)
707 )
708{
709 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
710 if(validate_result)
711 {
712 validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
713 }
714 else
715 {
716 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
717 framework::ARM_COMPUTE_PRINT_INFO();
718 }
719}
720
721TEST_SUITE_END() // FusedPostOps
722
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000723TEST_SUITE_END() // FP32
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100724
725TEST_SUITE(FP16)
726FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<half>, framework::DatasetMode::PRECOMMIT,
727 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
728 m_values,
729 n_values),
730 k_values),
731 b_values),
732 m0_values_precommit),
733 n0_values_precommit),
734 k0_values_precommit),
735 h0_values),
736 i_values_rhs),
737 t_values_rhs),
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100738 framework::dataset::make("export_to_cl_image_rhs", true)),
739 framework::dataset::make("DataType", DataType::F16)),
740 a_values),
741 beta_values),
742 broadcast_bias_values),
743 act_values))
744{
745 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000746 if(validate_result)
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100747 {
748 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
749 }
750 else
751 {
752 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
753 framework::ARM_COMPUTE_PRINT_INFO();
754 }
755}
756
757FIXTURE_DATA_TEST_CASE(RunNightly, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture<half>, framework::DatasetMode::NIGHTLY,
758 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
759 m_values,
760 n_values),
761 k_values),
762 b_values),
763 m0_values_nightly),
764 n0_values_nightly),
765 k0_values_nightly),
766 h0_values),
767 i_values_rhs),
768 t_values_rhs),
769 framework::dataset::make("export_to_cl_image_rhs", true)),
770 framework::dataset::make("DataType", DataType::F16)),
771 a_values),
772 beta_values),
773 broadcast_bias_values),
774 act_values))
775{
776 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000777 if(validate_result)
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100778 {
779 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
780 }
781 else
782 {
783 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
784 framework::ARM_COMPUTE_PRINT_INFO();
785 }
786}
787
788FIXTURE_DATA_TEST_CASE(RunPrecommit3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::PRECOMMIT,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100789 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100790 m_w_values,
791 m_h_values),
792 n_values),
793 k_values),
794 b_values),
795 m0_values_precommit),
796 n0_values_precommit),
797 k0_values_precommit),
798 h0_values),
799 i_values_rhs),
800 t_values_rhs),
801 framework::dataset::make("export_to_cl_image_rhs", true)),
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100802 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100803 framework::dataset::make("DataType", DataType::F16)),
804 a_values),
805 beta_values),
806 act_values))
807{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000808 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
809 if(validate_result)
810 {
811 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
812 }
813 else
814 {
815 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
816 framework::ARM_COMPUTE_PRINT_INFO();
817 }
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100818}
819
820FIXTURE_DATA_TEST_CASE(RunNightly3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture<half>, framework::DatasetMode::NIGHTLY,
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100821 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100822 m_w_values,
823 m_h_values),
824 n_values),
825 k_values),
826 b_values),
827 m0_values_nightly),
828 n0_values_nightly),
829 k0_values_nightly),
830 h0_values),
831 i_values_rhs),
832 t_values_rhs),
833 framework::dataset::make("export_to_cl_image_rhs", true)),
Gian Marco Iodice9ae06d42020-10-22 16:37:12 +0100834 framework::dataset::make("has_pad_y", {false, true})),
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100835 framework::dataset::make("DataType", DataType::F16)),
836 a_values),
837 beta_values),
838 act_values))
839{
Sheri Zhangcc3e53c2020-11-16 21:17:28 +0000840 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
841 if(validate_result)
842 {
843 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
844 }
845 else
846 {
847 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
848 framework::ARM_COMPUTE_PRINT_INFO();
849 }
Gian Marco Iodice6f931342020-09-15 14:17:41 +0100850}
SiCongLiafa19722021-10-24 19:12:33 +0100851TEST_SUITE(FusedPostOps)
852
853FIXTURE_DATA_TEST_CASE(RunPrecommit, CLGEMMMatrixMultiplyReshapedOnlyRHSWithPostOpsFixture<half>, framework::DatasetMode::ALL,
854 combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
855 m_values,
856 n_values),
857 k_values),
858 b_values),
859 m0_values_precommit),
860 n0_values_precommit),
861 k0_values_precommit),
862 framework::dataset::make("H0", {1})),
863 framework::dataset::make("interleave_rhs", { true })),
864 t_values_rhs),
865 framework::dataset::make("export_to_cl_image_rhs", true)),
866 framework::dataset::make("DataType", DataType::F16)),
867 a_values),
868 beta_values),
869 framework::dataset::make("broadcast_bias", { false } )),
870 act_values),
871 post_op_lists)
872 )
873{
874 // Validate output only if the target platform supports the OpenCL cl_khr_image2d_from_buffer extension
875 if(validate_result)
876 {
877 validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.f, abs_tolerance_f16);
878 }
879 else
880 {
881 ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped");
882 framework::ARM_COMPUTE_PRINT_INFO();
883 }
884}
885
886TEST_SUITE_END() // FusedPostOps
887
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100888TEST_SUITE_END() // FP16
889
Gian Marco Iodiceadc53952019-02-15 11:10:31 +0000890TEST_SUITE_END() // Float
891TEST_SUITE_END() // GEMMMatrixMulipltyReshapedOnlyRHS
892TEST_SUITE_END() // CL
893} // namespace validation
894} // namespace test
Georgios Pinitasb0f342e2019-05-21 13:32:43 +0100895} // namespace arm_compute