blob: 6a4db659226fbe498550265f9580eb818131d344 [file] [log] [blame]
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h"
25#include "arm_compute/core/CL/ICLTensor.h"
26#include "arm_compute/core/TensorInfo.h"
27#include "arm_compute/core/utils/misc/ShapeCalculator.h"
28#include "src/core/helpers/AutoConfiguration.h"
29
30#include "arm_compute/core/ITensorPack.h"
31#include "src/common/utils/Log.h"
32#include "src/core/helpers/WindowHelpers.h"
33#include "support/Cast.h"
34#include "utils/TypePrinter.h"
35
36namespace arm_compute
37{
38namespace opencl
39{
40namespace kernels
41{
42namespace
43{
44Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
45{
46 const bool adj_lhs = matmul_kernel_info.adj_lhs;
47 const bool adj_rhs = matmul_kernel_info.adj_rhs;
48 const int m0 = matmul_kernel_info.m0;
49 const int n0 = matmul_kernel_info.n0;
50 const int k0 = matmul_kernel_info.k0;
51
52 // Validate M0
53 if(!adj_lhs)
54 {
55 // We support any positive integer, but will test & benchmark only 1 to 8 because > 8 will not efficient
56 ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0 for Lhs non-transposed");
57 }
58 else
59 {
60 ARM_COMPUTE_RETURN_ERROR_ON_MSG((m0 & (m0 - 1)) && (m0 != 3) && (m0 > 16), "Only 1,2,3,4,8,16 are supported for N0 for Lhs transposed");
61 }
62
63 // Validate N0
64 ARM_COMPUTE_RETURN_ERROR_ON_MSG((n0 & (n0 - 1)) && (n0 != 3) && (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0");
65
66 // Validate K0
67 if(adj_lhs && !adj_rhs)
68 {
69 // We support any positive integer, but will test & benchmark only 1 to 8 because > 8 will not efficient
70 ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0 for Lhs transposed & Rhs non-transposed");
71 }
72 else
73 {
74 ARM_COMPUTE_RETURN_ERROR_ON_MSG((k0 & (k0 - 1)) && (k0 != 3) && (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0");
75 }
76
77 return Status{};
78}
79}
80ClNativeMatMulKernel::ClNativeMatMulKernel()
81{
82 _type = CLKernelType::GEMM;
83}
84Status ClNativeMatMulKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info)
85{
86 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, output);
87 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16);
88 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
89 ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
90
91 if(output->total_size() != 0)
92 {
93 const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_batchmatmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
94 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
95 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, output);
96 }
97 ARM_COMPUTE_RETURN_ERROR_ON_MSG(matmul_kernel_info.adj_lhs && matmul_kernel_info.adj_rhs, "LHS T and RHS T not implemented");
98 ARM_COMPUTE_RETURN_ERROR_ON_MSG(matmul_kernel_info.adj_lhs && !matmul_kernel_info.adj_rhs, "LHS T and RHS NT not implemented");
99
100 return Status{};
101}
102void ClNativeMatMulKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info)
103{
104 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output, &compile_context, &matmul_kernel_info);
105 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_kernel_info);
106
107 // output tensor auto initialization if not yet initialized
108 auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_batchmatmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)));
109 ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_kernel_info));
110
111 const int m = output->dimension(1);
112 const int n = output->dimension(0);
113 const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x();
114
115 int m0 = std::min(matmul_kernel_info.m0, m);
116 int n0 = adjust_vec_size(matmul_kernel_info.n0, n);
117
118 // Configure kernel window
119 Window win = calculate_max_window(*output, Steps(n0, m0));
120 win = win.collapse(win, Window::DimZ);
121 IClKernel::configure_internal(win);
122
123 // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
124 const unsigned int partial_store_m0 = m % m0; // M is output->dimension(1)
125 const unsigned int partial_store_n0 = n % n0; // N is output->dimension(0)
126
127 CLBuildOptions build_opts;
128 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type()));
129 build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
130 build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
131 build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0));
132 build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
133 build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
134 build_opts.add_option("-DK=" + support::cpp11::to_string(k));
135
136 std::string kernel_name("mat_mul_native");
137 kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt";
138 kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt";
139
140 if(matmul_kernel_info.adj_lhs)
141 {
142 ARM_COMPUTE_ERROR("Only Implemented LHS non-transposed kernels");
143 }
144
145 // A macro guard to compile ONLY the kernel of interest
146 build_opts.add_option("-D" + upper_string(kernel_name));
147
148 // Create kernel
149 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
150
151 // Set config_id for enabling LWS tuning
152 _config_id = kernel_name;
153 _config_id += "_";
154 _config_id += lower_string(string_from_data_type(lhs->data_type()));
155 _config_id += "_";
156 _config_id += support::cpp11::to_string(output->dimension(1));
157 _config_id += "_";
158 _config_id += support::cpp11::to_string(output->dimension(0));
159 _config_id += "_";
160 _config_id += support::cpp11::to_string(output->dimension(2));
161 _config_id += "_";
162 _config_id += support::cpp11::to_string(m0);
163 _config_id += "_";
164 _config_id += support::cpp11::to_string(n0);
165 _config_id += "_";
166 _config_id += support::cpp11::to_string(matmul_kernel_info.k0);
167}
168
169void ClNativeMatMulKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
170{
171 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
172 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
173
174 const ICLTensor *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
175 const ICLTensor *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
176 ICLTensor *output = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
177 ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output);
178 ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output);
179
180 unsigned int idx = 0;
181 Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
182
183 add_3d_tensor_nhw_argument(idx, lhs);
184 add_3d_tensor_nhw_argument(idx, rhs);
185 add_3d_tensor_nhw_argument(idx, output);
186
187 enqueue(queue, *this, window_collapsed, lws_hint());
188}
189
190} // namespace kernels
191} // namespace opencl
192} // namespace arm_compute