blob: ffbaf49c02ef2b770c66394824e10a8471e1fd6e [file] [log] [blame]
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h"
25#include "arm_compute/core/CL/ICLTensor.h"
26#include "arm_compute/core/TensorInfo.h"
27#include "arm_compute/core/utils/misc/ShapeCalculator.h"
28#include "src/core/helpers/AutoConfiguration.h"
29
30#include "arm_compute/core/ITensorPack.h"
31#include "src/common/utils/Log.h"
32#include "src/core/helpers/WindowHelpers.h"
33#include "support/Cast.h"
34#include "utils/TypePrinter.h"
35
36namespace arm_compute
37{
38namespace opencl
39{
40namespace kernels
41{
42namespace
43{
44Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
45{
46 const bool adj_lhs = matmul_kernel_info.adj_lhs;
47 const bool adj_rhs = matmul_kernel_info.adj_rhs;
48 const int m0 = matmul_kernel_info.m0;
49 const int n0 = matmul_kernel_info.n0;
50 const int k0 = matmul_kernel_info.k0;
51
52 // Validate M0
Gunes Bayir8918b232023-03-17 13:52:21 +000053 ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0");
54
55 if(adj_lhs)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000056 {
Gunes Bayir8918b232023-03-17 13:52:21 +000057 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((m0 & (m0 - 1)) && (m0 != 3)) || (m0 > 16), "Only 1,2,3,4,8,16 are supported for N0 for Lhs transposed");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000058 }
59
60 // Validate N0
Gunes Bayir8918b232023-03-17 13:52:21 +000061 ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0");
62 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000063
64 // Validate K0
Gunes Bayir8918b232023-03-17 13:52:21 +000065 ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0");
66 if(!adj_lhs || adj_rhs)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000067 {
Gunes Bayir8918b232023-03-17 13:52:21 +000068 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((k0 & (k0 - 1)) && (k0 != 3)) || (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000069 }
Gunes Bayir8918b232023-03-17 13:52:21 +000070
71 return Status{};
72}
73
74Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info)
75{
76 const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
77 const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y();
78
79 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match.");
80 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty");
81 ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty");
82
83 constexpr size_t batch_dim_start = 2;
84 for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000085 {
Gunes Bayir8918b232023-03-17 13:52:21 +000086 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported");
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000087 }
88
89 return Status{};
90}
91}
92ClNativeMatMulKernel::ClNativeMatMulKernel()
93{
94 _type = CLKernelType::GEMM;
95}
96Status ClNativeMatMulKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info)
97{
98 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, output);
99 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16);
100 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
101 ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
Gunes Bayir8918b232023-03-17 13:52:21 +0000102 ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000103
104 if(output->total_size() != 0)
105 {
Gunes Bayir8918b232023-03-17 13:52:21 +0000106 const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000107 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
108 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, output);
109 }
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000110
111 return Status{};
112}
113void ClNativeMatMulKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info)
114{
115 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output, &compile_context, &matmul_kernel_info);
116 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_kernel_info);
117
118 // output tensor auto initialization if not yet initialized
Gunes Bayir8918b232023-03-17 13:52:21 +0000119 auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)));
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000120 ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_kernel_info));
121
Gunes Bayir8918b232023-03-17 13:52:21 +0000122 const int m = output->dimension(1);
123 const int n = output->dimension(0);
124 const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x();
125 const bool adj_lhs = matmul_kernel_info.adj_lhs;
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000126
Gunes Bayir8918b232023-03-17 13:52:21 +0000127 int m0 = adj_lhs ? adjust_vec_size(matmul_kernel_info.m0, m) : std::min(matmul_kernel_info.m0, m);
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000128 int n0 = adjust_vec_size(matmul_kernel_info.n0, n);
129
130 // Configure kernel window
131 Window win = calculate_max_window(*output, Steps(n0, m0));
132 win = win.collapse(win, Window::DimZ);
133 IClKernel::configure_internal(win);
134
135 // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
136 const unsigned int partial_store_m0 = m % m0; // M is output->dimension(1)
137 const unsigned int partial_store_n0 = n % n0; // N is output->dimension(0)
138
139 CLBuildOptions build_opts;
140 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type()));
141 build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
142 build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
143 build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0));
144 build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
145 build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
146 build_opts.add_option("-DK=" + support::cpp11::to_string(k));
147
148 std::string kernel_name("mat_mul_native");
149 kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt";
150 kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt";
151
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000152 // A macro guard to compile ONLY the kernel of interest
153 build_opts.add_option("-D" + upper_string(kernel_name));
154
155 // Create kernel
156 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
157
158 // Set config_id for enabling LWS tuning
159 _config_id = kernel_name;
160 _config_id += "_";
161 _config_id += lower_string(string_from_data_type(lhs->data_type()));
162 _config_id += "_";
163 _config_id += support::cpp11::to_string(output->dimension(1));
164 _config_id += "_";
165 _config_id += support::cpp11::to_string(output->dimension(0));
166 _config_id += "_";
167 _config_id += support::cpp11::to_string(output->dimension(2));
168 _config_id += "_";
169 _config_id += support::cpp11::to_string(m0);
170 _config_id += "_";
171 _config_id += support::cpp11::to_string(n0);
172 _config_id += "_";
173 _config_id += support::cpp11::to_string(matmul_kernel_info.k0);
174}
175
176void ClNativeMatMulKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
177{
178 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
179 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
180
181 const ICLTensor *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
182 const ICLTensor *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
183 ICLTensor *output = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
184 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output);
185 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output);
186
187 unsigned int idx = 0;
188 Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
189
190 add_3d_tensor_nhw_argument(idx, lhs);
191 add_3d_tensor_nhw_argument(idx, rhs);
192 add_3d_tensor_nhw_argument(idx, output);
193
194 enqueue(queue, *this, window_collapsed, lws_hint());
195}
196
197} // namespace kernels
198} // namespace opencl
199} // namespace arm_compute