blob: 8f8ccfc41f39ca5858278f50fae8245e6ac2c4c3 [file] [log] [blame]
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Jakub Sujak1ed6a142023-04-13 21:14:42 +010024#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000025
Matthew Bentham314d3e22023-06-23 10:53:52 +000026#include "arm_compute/core/utils/ActivationFunctionUtils.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000027#include "arm_compute/core/CL/CLHelpers.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000028#include "arm_compute/core/CL/ICLTensor.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000029#include "arm_compute/core/ITensorPack.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000030#include "arm_compute/core/TensorInfo.h"
Matthew Bentham314d3e22023-06-23 10:53:52 +000031#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000032#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Matthew Bentham314d3e22023-06-23 10:53:52 +000033#include "arm_compute/core/utils/StringUtils.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000034
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000035#include "src/common/utils/Log.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000036#include "src/core/CL/CLUtils.h"
37#include "src/core/helpers/AutoConfiguration.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000038#include "src/core/helpers/WindowHelpers.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000039#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
40
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000041#include "support/Cast.h"
Gunes Bayirbbeef722023-03-20 10:19:10 +000042#include "support/StringSupport.h"
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000043
44namespace arm_compute
45{
46namespace opencl
47{
48namespace kernels
49{
50namespace
51{
52Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
53{
54 const bool adj_lhs = matmul_kernel_info.adj_lhs;
55 const bool adj_rhs = matmul_kernel_info.adj_rhs;
56 const int m0 = matmul_kernel_info.m0;
57 const int n0 = matmul_kernel_info.n0;
58 const int k0 = matmul_kernel_info.k0;
59
60 // Validate M0
Gunes Bayir8918b232023-03-17 13:52:21 +000061 ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0");
62
63 if(adj_lhs)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000064 {
Gunes Bayirbbeef722023-03-20 10:19:10 +000065 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((m0 & (m0 - 1)) && (m0 != 3)) || (m0 > 16), "Only 1,2,3,4,8,16 are supported for M0 for Lhs transposed");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000066 }
67
68 // Validate N0
Gunes Bayir8918b232023-03-17 13:52:21 +000069 ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0");
70 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000071
72 // Validate K0
Gunes Bayir8918b232023-03-17 13:52:21 +000073 ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0");
74 if(!adj_lhs || adj_rhs)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000075 {
Gunes Bayir8918b232023-03-17 13:52:21 +000076 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((k0 & (k0 - 1)) && (k0 != 3)) || (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000077 }
Gunes Bayir8918b232023-03-17 13:52:21 +000078
79 return Status{};
80}
81
82Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info)
83{
84 const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
85 const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y();
86
87 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match.");
88 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty");
89 ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty");
90
91 constexpr size_t batch_dim_start = 2;
92 for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000093 {
Gunes Bayir8918b232023-03-17 13:52:21 +000094 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000095 }
96
97 return Status{};
98}
Gunes Bayirbbeef722023-03-20 10:19:10 +000099
100Status validate_export_to_cl_image(const ITensorInfo *rhs, const MatMulKernelInfo &matmul_kernel_info)
101{
102 ARM_COMPUTE_RETURN_ERROR_ON(matmul_kernel_info.export_rhs_to_cl_image && rhs->lock_paddings());
103 if(matmul_kernel_info.export_rhs_to_cl_image)
104 {
105 if(matmul_kernel_info.adj_rhs)
106 {
107 const int k0 = matmul_kernel_info.k0;
108 ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 != 4 && k0 != 8 && k0 != 16, "K0 can only be: 4, 8, and 16 for Rhs transposed");
109 }
110 else
111 {
112 const int n0 = matmul_kernel_info.n0;
113 ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 != 4 && n0 != 8 && n0 != 16, "N0 can only be: 4, 8, and 16 for Rhs non-transposed");
114 }
115 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!export_to_cl_image(rhs), "Export to CLImage is not supported for this device/configuration");
116 }
117
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100118 return Status{};
Gunes Bayirbbeef722023-03-20 10:19:10 +0000119}
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000120}
Jakub Sujak1ed6a142023-04-13 21:14:42 +0100121ClMatMulNativeKernel::ClMatMulNativeKernel()
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000122{
123 _type = CLKernelType::GEMM;
124}
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100125
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100126Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
127 const ActivationLayerInfo &act_info)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000128{
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100129 ARM_COMPUTE_UNUSED(act_info);
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100130 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000131 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16);
132 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
133 ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
Gunes Bayir8918b232023-03-17 13:52:21 +0000134 ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
Gunes Bayirbbeef722023-03-20 10:19:10 +0000135 ARM_COMPUTE_RETURN_ON_ERROR(validate_export_to_cl_image(rhs, matmul_kernel_info));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000136
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100137 const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info);
138
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100139 if(dst->total_size() != 0)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000140 {
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100141 const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_output_shape);
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100142 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
143 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000144 }
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000145
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100146 if(bias != nullptr)
147 {
148 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, lhs);
149 ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias->num_dimensions() > 1), "Multi dimensional bias is unsupported.");
150 ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != expected_output_shape[0], "First dimension of bias and output tensors must match.");
151 }
152
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000153 return Status{};
154}
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100155void ClMatMulNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100156 const ActivationLayerInfo &act_info)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000157{
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100158 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst, &compile_context, &matmul_kernel_info);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100159 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst, matmul_kernel_info);
160 ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, bias, dst, matmul_kernel_info));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000161
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100162 // dst tensor auto initialization if not yet initialized
163 auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000164
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100165 const int m = dst->dimension(1);
166 const int n = dst->dimension(0);
Gunes Bayir8918b232023-03-17 13:52:21 +0000167 const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x();
168 const bool adj_lhs = matmul_kernel_info.adj_lhs;
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000169
Gunes Bayir8918b232023-03-17 13:52:21 +0000170 int m0 = adj_lhs ? adjust_vec_size(matmul_kernel_info.m0, m) : std::min(matmul_kernel_info.m0, m);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000171 int n0 = adjust_vec_size(matmul_kernel_info.n0, n);
172
Gunes Bayirbbeef722023-03-20 10:19:10 +0000173 _export_rhs_to_cl_image = matmul_kernel_info.export_rhs_to_cl_image && !rhs->lock_paddings();
174
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000175 // Configure kernel window
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100176 Window win = calculate_max_window(*dst, Steps(n0, m0));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000177 win = win.collapse(win, Window::DimZ);
178 IClKernel::configure_internal(win);
179
180 // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
Gunes Bayirbbeef722023-03-20 10:19:10 +0000181 const unsigned int partial_store_m0 = m % m0;
182 const unsigned int partial_store_n0 = n % n0;
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000183
184 CLBuildOptions build_opts;
185 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type()));
186 build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
187 build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
188 build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0));
189 build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
190 build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
191 build_opts.add_option("-DK=" + support::cpp11::to_string(k));
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100192 build_opts.add_option_if(bias != nullptr, "-DBIAS");
Gunes Bayirbbeef722023-03-20 10:19:10 +0000193 build_opts.add_option_if_else(_export_rhs_to_cl_image, "-DRHS_TENSOR_TYPE=IMAGE", "-DRHS_TENSOR_TYPE=BUFFER");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000194
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100195 // Define values for activation function
196 build_opts.add_option(("-DA_VAL=" + float_to_string_with_full_precision(act_info.a())));
197 build_opts.add_option(("-DB_VAL=" + float_to_string_with_full_precision(act_info.b())));
198 build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
199
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000200 std::string kernel_name("mat_mul_native");
201 kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt";
202 kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt";
203
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000204 // A macro guard to compile ONLY the kernel of interest
205 build_opts.add_option("-D" + upper_string(kernel_name));
206
Gunes Bayirbbeef722023-03-20 10:19:10 +0000207 if(_export_rhs_to_cl_image)
208 {
209 gemm::update_padding_for_cl_image(rhs);
210 }
211
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000212 // Create kernel
213 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
214
215 // Set config_id for enabling LWS tuning
216 _config_id = kernel_name;
217 _config_id += "_";
218 _config_id += lower_string(string_from_data_type(lhs->data_type()));
219 _config_id += "_";
Gunes Bayirbbeef722023-03-20 10:19:10 +0000220 _config_id += support::cpp11::to_string(m);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000221 _config_id += "_";
Gunes Bayirbbeef722023-03-20 10:19:10 +0000222 _config_id += support::cpp11::to_string(n);
223 _config_id += "_";
224 _config_id += support::cpp11::to_string(k);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000225 _config_id += "_";
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100226 _config_id += support::cpp11::to_string(dst->dimension(2));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000227 _config_id += "_";
Gunes Bayirbbeef722023-03-20 10:19:10 +0000228 _config_id += support::cpp11::to_string(_export_rhs_to_cl_image);
229 _config_id += "_";
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000230 _config_id += support::cpp11::to_string(m0);
231 _config_id += "_";
232 _config_id += support::cpp11::to_string(n0);
233 _config_id += "_";
234 _config_id += support::cpp11::to_string(matmul_kernel_info.k0);
235}
236
Jakub Sujak1ed6a142023-04-13 21:14:42 +0100237void ClMatMulNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000238{
239 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
240 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
241
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100242 const ICLTensor *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
243 const ICLTensor *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
244 const ICLTensor *bias = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2)); // nullptr if bias is not present
245 ICLTensor *dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100246 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100247 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000248
249 unsigned int idx = 0;
250 Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
251
252 add_3d_tensor_nhw_argument(idx, lhs);
Gunes Bayirbbeef722023-03-20 10:19:10 +0000253
254 cl::Image2D rhs_cl_image;
255 if(_export_rhs_to_cl_image)
256 {
257 const size_t image_w = rhs->info()->dimension(0) / 4;
258 const size_t image_h = rhs->info()->tensor_shape().total_size() / rhs->info()->dimension(0);
259 const TensorShape shape2d(image_w, image_h);
260 const size_t image_row_pitch = rhs->info()->strides_in_bytes()[1];
261
262 // Export cl_buffer to cl_image
263 rhs_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), rhs->cl_buffer(), shape2d, rhs->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
264 _kernel.setArg(idx++, rhs_cl_image);
265 }
266
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000267 add_3d_tensor_nhw_argument(idx, rhs);
Mohammed Suhail Munshi8e2dede2023-06-27 14:25:58 +0100268 if(bias != nullptr)
269 {
270 add_3d_tensor_nhw_argument(idx, bias);
271 }
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100272 add_3d_tensor_nhw_argument(idx, dst);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000273
274 enqueue(queue, *this, window_collapsed, lws_hint());
275}
276
277} // namespace kernels
278} // namespace opencl
279} // namespace arm_compute