blob: a1fa9fa9ab22af29a926165a90aea844398a7d1f [file] [log] [blame]
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Jakub Sujak1ed6a142023-04-13 21:14:42 +010024#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000025
26#include "arm_compute/core/CL/CLHelpers.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000027#include "arm_compute/core/CL/ICLTensor.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000028#include "arm_compute/core/ITensorPack.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000029#include "arm_compute/core/TensorInfo.h"
Gunes Bayire87fa662023-09-07 12:20:33 +010030#include "arm_compute/core/utils/ActivationFunctionUtils.h"
Matthew Bentham314d3e22023-06-23 10:53:52 +000031#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000032#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010033#include "arm_compute/core/utils/StringUtils.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000034
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000035#include "src/common/utils/Log.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000036#include "src/core/CL/CLUtils.h"
37#include "src/core/helpers/AutoConfiguration.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000038#include "src/core/helpers/WindowHelpers.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000039#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
Gunes Bayire87fa662023-09-07 12:20:33 +010040#include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000041#include "support/Cast.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000042#include "support/StringSupport.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000043
44namespace arm_compute
45{
46namespace opencl
47{
48namespace kernels
49{
50namespace
51{
52Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
53{
54 const bool adj_lhs = matmul_kernel_info.adj_lhs;
55 const bool adj_rhs = matmul_kernel_info.adj_rhs;
56 const int m0 = matmul_kernel_info.m0;
57 const int n0 = matmul_kernel_info.n0;
58 const int k0 = matmul_kernel_info.k0;
59
60 // Validate M0
Gunes Bayir8918b232023-03-17 13:52:21 +000061 ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0");
62
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010063 if (adj_lhs)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000064 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010065 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((m0 & (m0 - 1)) && (m0 != 3)) || (m0 > 16),
66 "Only 1,2,3,4,8,16 are supported for M0 for Lhs transposed");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000067 }
68
69 // Validate N0
Gunes Bayir8918b232023-03-17 13:52:21 +000070 ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0");
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010071 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16),
72 "Only 1,2,3,4,8,16 are supported for N0");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000073
74 // Validate K0
Gunes Bayir8918b232023-03-17 13:52:21 +000075 ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0");
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010076 if (!adj_lhs || adj_rhs)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000077 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010078 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((k0 & (k0 - 1)) && (k0 != 3)) || (k0 > 16),
79 "Only 1,2,3,4,8,16 are supported for K0");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000080 }
Gunes Bayir8918b232023-03-17 13:52:21 +000081
82 return Status{};
83}
84
Gunes Bayirbbeef722023-03-20 10:19:10 +000085Status validate_export_to_cl_image(const ITensorInfo *rhs, const MatMulKernelInfo &matmul_kernel_info)
86{
87 ARM_COMPUTE_RETURN_ERROR_ON(matmul_kernel_info.export_rhs_to_cl_image && rhs->lock_paddings());
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010088 if (matmul_kernel_info.export_rhs_to_cl_image)
Gunes Bayirbbeef722023-03-20 10:19:10 +000089 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010090 if (matmul_kernel_info.adj_rhs)
Gunes Bayirbbeef722023-03-20 10:19:10 +000091 {
92 const int k0 = matmul_kernel_info.k0;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010093 ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 != 4 && k0 != 8 && k0 != 16,
94 "K0 can only be: 4, 8, and 16 for Rhs transposed");
Gunes Bayirbbeef722023-03-20 10:19:10 +000095 }
96 else
97 {
98 const int n0 = matmul_kernel_info.n0;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010099 ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 != 4 && n0 != 8 && n0 != 16,
100 "N0 can only be: 4, 8, and 16 for Rhs non-transposed");
Gunes Bayirbbeef722023-03-20 10:19:10 +0000101 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100102 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!export_to_cl_image(rhs),
103 "Export to CLImage is not supported for this device/configuration");
Gunes Bayirbbeef722023-03-20 10:19:10 +0000104 }
105
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100106 return Status{};
Gunes Bayirbbeef722023-03-20 10:19:10 +0000107}
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100108} // namespace
Jakub Sujak1ed6a142023-04-13 21:14:42 +0100109ClMatMulNativeKernel::ClMatMulNativeKernel()
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000110{
111 _type = CLKernelType::GEMM;
112}
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100113
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100114Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs,
115 const ITensorInfo *rhs,
116 const ITensorInfo *bias,
117 const ITensorInfo *dst,
118 const MatMulKernelInfo &matmul_kernel_info,
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100119 const ActivationLayerInfo &act_info)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000120{
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100121 ARM_COMPUTE_UNUSED(act_info);
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100122 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000123 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16);
124 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
125 ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100126 ARM_COMPUTE_RETURN_ON_ERROR(
127 validate_matmul_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
Gunes Bayirbbeef722023-03-20 10:19:10 +0000128 ARM_COMPUTE_RETURN_ON_ERROR(validate_export_to_cl_image(rhs, matmul_kernel_info));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000129
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100130 const TensorShape expected_output_shape =
131 misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100132
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100133 if (dst->total_size() != 0)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000134 {
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100135 const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_output_shape);
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100136 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
137 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000138 }
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000139
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100140 if (bias != nullptr)
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100141 {
142 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, lhs);
143 ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias->num_dimensions() > 1), "Multi dimensional bias is unsupported.");
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100144 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != expected_output_shape[0],
145 "First dimension of bias and output tensors must match.");
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100146 }
147
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000148 return Status{};
149}
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100150void ClMatMulNativeKernel::configure(const ClCompileContext &compile_context,
151 ITensorInfo *lhs,
152 ITensorInfo *rhs,
153 ITensorInfo *bias,
154 ITensorInfo *dst,
155 const MatMulKernelInfo &matmul_kernel_info,
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100156 const ActivationLayerInfo &act_info)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000157{
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100158 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst, &compile_context, &matmul_kernel_info);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100159 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst, matmul_kernel_info);
160 ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, bias, dst, matmul_kernel_info));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000161
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100162 // dst tensor auto initialization if not yet initialized
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100163 auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(
164 lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000165
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100166 const int m = dst->dimension(1);
167 const int n = dst->dimension(0);
Gunes Bayir8918b232023-03-17 13:52:21 +0000168 const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x();
169 const bool adj_lhs = matmul_kernel_info.adj_lhs;
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000170
Gunes Bayir8918b232023-03-17 13:52:21 +0000171 int m0 = adj_lhs ? adjust_vec_size(matmul_kernel_info.m0, m) : std::min(matmul_kernel_info.m0, m);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000172 int n0 = adjust_vec_size(matmul_kernel_info.n0, n);
173
Gunes Bayirbbeef722023-03-20 10:19:10 +0000174 _export_rhs_to_cl_image = matmul_kernel_info.export_rhs_to_cl_image && !rhs->lock_paddings();
175
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000176 // Configure kernel window
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100177 Window win = calculate_max_window(*dst, Steps(n0, m0));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000178 win = win.collapse(win, Window::DimZ);
179 IClKernel::configure_internal(win);
180
181 // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
Gunes Bayirbbeef722023-03-20 10:19:10 +0000182 const unsigned int partial_store_m0 = m % m0;
183 const unsigned int partial_store_n0 = n % n0;
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000184
185 CLBuildOptions build_opts;
186 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type()));
187 build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
188 build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
189 build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0));
190 build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
191 build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
192 build_opts.add_option("-DK=" + support::cpp11::to_string(k));
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100193 build_opts.add_option_if(bias != nullptr, "-DBIAS");
Gunes Bayirbbeef722023-03-20 10:19:10 +0000194 build_opts.add_option_if_else(_export_rhs_to_cl_image, "-DRHS_TENSOR_TYPE=IMAGE", "-DRHS_TENSOR_TYPE=BUFFER");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000195
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100196 // Define values for activation function
197 build_opts.add_option(("-DA_VAL=" + float_to_string_with_full_precision(act_info.a())));
198 build_opts.add_option(("-DB_VAL=" + float_to_string_with_full_precision(act_info.b())));
199 build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
200
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000201 std::string kernel_name("mat_mul_native");
202 kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt";
203 kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt";
204
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000205 // A macro guard to compile ONLY the kernel of interest
206 build_opts.add_option("-D" + upper_string(kernel_name));
207
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100208 if (_export_rhs_to_cl_image)
Gunes Bayirbbeef722023-03-20 10:19:10 +0000209 {
210 gemm::update_padding_for_cl_image(rhs);
211 }
212
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000213 // Create kernel
214 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
215
216 // Set config_id for enabling LWS tuning
217 _config_id = kernel_name;
218 _config_id += "_";
219 _config_id += lower_string(string_from_data_type(lhs->data_type()));
220 _config_id += "_";
Gunes Bayirbbeef722023-03-20 10:19:10 +0000221 _config_id += support::cpp11::to_string(m);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000222 _config_id += "_";
Gunes Bayirbbeef722023-03-20 10:19:10 +0000223 _config_id += support::cpp11::to_string(n);
224 _config_id += "_";
225 _config_id += support::cpp11::to_string(k);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000226 _config_id += "_";
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100227 _config_id += support::cpp11::to_string(dst->dimension(2));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000228 _config_id += "_";
Gunes Bayirbbeef722023-03-20 10:19:10 +0000229 _config_id += support::cpp11::to_string(_export_rhs_to_cl_image);
230 _config_id += "_";
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000231 _config_id += support::cpp11::to_string(m0);
232 _config_id += "_";
233 _config_id += support::cpp11::to_string(n0);
234 _config_id += "_";
235 _config_id += support::cpp11::to_string(matmul_kernel_info.k0);
236}
237
Jakub Sujak1ed6a142023-04-13 21:14:42 +0100238void ClMatMulNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000239{
240 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
241 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
242
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100243 const ICLTensor *lhs =
244 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
245 const ICLTensor *rhs =
246 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
247 const ICLTensor *bias = utils::cast::polymorphic_downcast<const ICLTensor *>(
248 tensors.get_const_tensor(TensorType::ACL_SRC_2)); // nullptr if bias is not present
249 ICLTensor *dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100250 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100251 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000252
253 unsigned int idx = 0;
254 Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
255
256 add_3d_tensor_nhw_argument(idx, lhs);
Gunes Bayirbbeef722023-03-20 10:19:10 +0000257
258 cl::Image2D rhs_cl_image;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100259 if (_export_rhs_to_cl_image)
Gunes Bayirbbeef722023-03-20 10:19:10 +0000260 {
261 const size_t image_w = rhs->info()->dimension(0) / 4;
262 const size_t image_h = rhs->info()->tensor_shape().total_size() / rhs->info()->dimension(0);
263 const TensorShape shape2d(image_w, image_h);
264 const size_t image_row_pitch = rhs->info()->strides_in_bytes()[1];
265
266 // Export cl_buffer to cl_image
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100267 rhs_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), rhs->cl_buffer(), shape2d,
268 rhs->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
Gunes Bayirbbeef722023-03-20 10:19:10 +0000269 _kernel.setArg(idx++, rhs_cl_image);
270 }
271
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000272 add_3d_tensor_nhw_argument(idx, rhs);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100273 if (bias != nullptr)
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100274 {
275 add_3d_tensor_nhw_argument(idx, bias);
276 }
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100277 add_3d_tensor_nhw_argument(idx, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000278
279 enqueue(queue, *this, window_collapsed, lws_hint());
280}
281
282} // namespace kernels
283} // namespace opencl
284} // namespace arm_compute