SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2023 Arm Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "src/gpu/cl/kernels/ClMatMulNativeMMULKernel.h" |
| 25 | |
| 26 | #include "arm_compute/core/CL/CLHelpers.h" |
| 27 | #include "arm_compute/core/CL/ICLTensor.h" |
| 28 | #include "arm_compute/core/ITensorPack.h" |
| 29 | #include "arm_compute/core/KernelDescriptors.h" |
| 30 | #include "arm_compute/core/TensorInfo.h" |
| 31 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
| 32 | |
| 33 | #include "src/common/utils/Log.h" |
| 34 | #include "src/core/helpers/AutoConfiguration.h" |
| 35 | #include "src/core/helpers/WindowHelpers.h" |
| 36 | |
| 37 | #include "support/Cast.h" |
| 38 | #include "support/StringSupport.h" |
| 39 | |
| 40 | namespace arm_compute |
| 41 | { |
| 42 | namespace opencl |
| 43 | { |
| 44 | namespace kernels |
| 45 | { |
| 46 | namespace |
| 47 | { |
| 48 | // Block size dimensions for the MMUL extension |
| 49 | constexpr int mmul_m0 = 4; |
| 50 | constexpr int mmul_n0 = 4; |
| 51 | constexpr int mmul_k0 = 4; |
| 52 | |
| 53 | inline std::pair<int, int> adjust_m0_n0(int m0, int n0, int m, int n) |
| 54 | { |
| 55 | m0 = std::min(m0, m); |
| 56 | n0 = adjust_vec_size(n0, n); |
| 57 | return { m0, n0 }; |
| 58 | } |
| 59 | |
| 60 | Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info) |
| 61 | { |
| 62 | const bool adj_lhs = matmul_kernel_info.adj_lhs; |
Ramy Elgammal | c952596 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 63 | const int m0 = matmul_kernel_info.m0; |
| 64 | const int n0 = matmul_kernel_info.n0; |
| 65 | const int k0 = matmul_kernel_info.k0; |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 66 | |
Ramy Elgammal | c952596 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 67 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((adj_lhs), "adj_lhs is not supported yet"); |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 68 | |
| 69 | // Validate M0 |
| 70 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0"); |
| 71 | |
| 72 | // Validate N0 |
| 73 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0"); |
| 74 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0"); |
| 75 | |
| 76 | // Validate K0 |
| 77 | ARM_COMPUTE_RETURN_ERROR_ON_MSG((k0 != 1), "Only 1 is supported for k0"); |
| 78 | |
| 79 | return Status{}; |
| 80 | } |
| 81 | |
| 82 | Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info) |
| 83 | { |
| 84 | ARM_COMPUTE_UNUSED(matmul_kernel_info); |
| 85 | const size_t lhs_k = lhs_shape.x(); |
Ramy Elgammal | c952596 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 86 | const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y(); |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 87 | |
| 88 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match."); |
| 89 | ARM_COMPUTE_RETURN_ERROR_ON_MSG_VAR((lhs_k % mmul_k0) != 0, "K dimension must be a multiple of %d", mmul_k0); |
| 90 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty"); |
| 91 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty"); |
| 92 | |
| 93 | constexpr size_t batch_dim_start = 2; |
| 94 | for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i) |
| 95 | { |
| 96 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported"); |
| 97 | } |
| 98 | |
| 99 | return Status{}; |
| 100 | } |
| 101 | |
| 102 | std::pair<Status, Window> validate_and_configure_window(ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) |
| 103 | { |
| 104 | ARM_COMPUTE_UNUSED(lhs, rhs); |
| 105 | |
| 106 | const Window win = calculate_max_window(*dst, Steps(1, 1)); |
| 107 | |
| 108 | // Collapse along the Z direction |
| 109 | // This collapse needs to be here in order to tune the Z dimension of LWS |
| 110 | Window collapsed = win.collapse(win, Window::DimZ); |
| 111 | |
| 112 | // Reconfigure window size, one arm_matrix_multiply call needs 16 threads to finish. |
| 113 | Window::Dimension x_dimension = collapsed.x(); |
| 114 | Window::Dimension y_dimension = collapsed.y(); |
| 115 | |
| 116 | const int m = dst->dimension(1); |
| 117 | const int n = dst->dimension(0); |
| 118 | |
| 119 | int m0{}; |
| 120 | int n0{}; |
| 121 | std::tie(m0, n0) = adjust_m0_n0(matmul_kernel_info.m0, matmul_kernel_info.n0, m, n); |
| 122 | |
| 123 | // Make M and N multiple of M0 and N0 respectively |
| 124 | const unsigned int ceil_to_multiple_n_n0 = ceil_to_multiple(n, n0); |
| 125 | const unsigned int ceil_to_multiple_m_m0 = ceil_to_multiple(m, m0); |
| 126 | |
| 127 | // Divide M and N by M0 and N0 respectively |
| 128 | const unsigned int n_div_n0 = ceil_to_multiple_n_n0 / n0; |
| 129 | const unsigned int m_div_m0 = ceil_to_multiple_m_m0 / m0; |
| 130 | |
| 131 | // Make n_div_n0 and m_div_m0 multiple of mmul_n0 and mmul_m0 respectively |
| 132 | const unsigned int ceil_to_multiple_n_div_n0_mmul_n0 = ceil_to_multiple(n_div_n0, mmul_n0); |
| 133 | const unsigned int ceil_to_multiple_m_div_m0_mmul_m0 = ceil_to_multiple(m_div_m0, mmul_m0); |
| 134 | |
| 135 | // Ensure x_dimension is multiple of MMUL block size (mmul_m0 * mmul_n0) |
| 136 | x_dimension.set_end(ceil_to_multiple_n_div_n0_mmul_n0 * mmul_m0); |
| 137 | y_dimension.set_end(ceil_to_multiple_m_div_m0_mmul_m0 / mmul_m0); |
| 138 | |
| 139 | collapsed.set(Window::DimX, x_dimension); |
| 140 | collapsed.set(Window::DimY, y_dimension); |
| 141 | |
| 142 | return std::make_pair(Status{}, collapsed); |
| 143 | } |
| 144 | } |
| 145 | ClMatMulNativeMMULKernel::ClMatMulNativeMMULKernel() |
| 146 | { |
| 147 | _type = CLKernelType::GEMM; |
| 148 | } |
| 149 | |
| 150 | Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) |
| 151 | { |
| 152 | ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); |
| 153 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16); |
| 154 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()), "The extension cl_arm_matrix_multiply is not supported on the target platform"); |
| 155 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); |
| 156 | ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); |
| 157 | ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); |
| 158 | |
| 159 | if(dst->total_size() != 0) |
| 160 | { |
| 161 | const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); |
| 162 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); |
| 163 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); |
| 164 | } |
| 165 | |
| 166 | return Status{}; |
| 167 | } |
| 168 | void ClMatMulNativeMMULKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) |
| 169 | { |
| 170 | ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst); |
| 171 | ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst, matmul_kernel_info); |
| 172 | ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, dst, matmul_kernel_info)); |
| 173 | |
| 174 | // dst tensor auto initialization if not yet initialized |
| 175 | auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); |
| 176 | |
| 177 | const int m = dst->dimension(1); |
| 178 | const int n = dst->dimension(0); |
Ramy Elgammal | c952596 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 179 | const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x(); |
| 180 | |
| 181 | _m = m; |
| 182 | _n = n; |
| 183 | _k = k; |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 184 | |
| 185 | int m0{}; |
| 186 | int n0{}; |
| 187 | std::tie(m0, n0) = adjust_m0_n0(matmul_kernel_info.m0, matmul_kernel_info.n0, m, n); |
| 188 | |
| 189 | // Configure kernel window |
| 190 | const auto win_config = validate_and_configure_window(lhs, rhs, dst, matmul_kernel_info); |
| 191 | ARM_COMPUTE_ERROR_THROW_ON(win_config.first); |
| 192 | IClKernel::configure_internal(win_config.second); |
| 193 | |
| 194 | // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding. |
| 195 | const unsigned int m0_leftover = m % m0; |
| 196 | const unsigned int n0_leftover = n % n0; |
| 197 | |
| 198 | CLBuildOptions build_opts; |
| 199 | build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type())); |
| 200 | build_opts.add_option_if(lhs->data_type() == DataType::F16, "-DHALF_PRECISION"); |
| 201 | build_opts.add_option("-DM0=" + support::cpp11::to_string(m0)); |
| 202 | build_opts.add_option("-DN0=" + support::cpp11::to_string(n0)); |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 203 | build_opts.add_option("-DM0_LEFTOVER=" + support::cpp11::to_string(m0_leftover)); |
| 204 | build_opts.add_option("-DN0_LEFTOVER=" + support::cpp11::to_string(n0_leftover)); |
| 205 | build_opts.add_option("-DMMUL_M0=" + support::cpp11::to_string(mmul_m0)); |
| 206 | build_opts.add_option("-DMMUL_N0=" + support::cpp11::to_string(mmul_n0)); |
| 207 | build_opts.add_option("-DMMUL_K0=" + support::cpp11::to_string(mmul_k0)); |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 208 | |
Ramy Elgammal | c952596 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 209 | std::string kernel_name("mat_mul_native_mmul"); |
| 210 | kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; |
| 211 | kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt"; |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 212 | |
| 213 | // A macro guard to compile ONLY the kernel of interest |
| 214 | build_opts.add_option("-D" + upper_string(kernel_name)); |
| 215 | |
| 216 | // Create kernel |
| 217 | _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); |
| 218 | |
| 219 | // Set config_id for enabling LWS tuning |
| 220 | _config_id = kernel_name; |
| 221 | _config_id += "_"; |
| 222 | _config_id += lower_string(string_from_data_type(lhs->data_type())); |
| 223 | _config_id += "_"; |
| 224 | _config_id += support::cpp11::to_string(k); |
| 225 | _config_id += "_"; |
| 226 | _config_id += support::cpp11::to_string(dst->dimension(2)); |
| 227 | _config_id += "_"; |
| 228 | _config_id += support::cpp11::to_string(m0); |
| 229 | _config_id += "_"; |
| 230 | _config_id += support::cpp11::to_string(n0); |
| 231 | _config_id += "_"; |
| 232 | _config_id += support::cpp11::to_string(matmul_kernel_info.k0); |
| 233 | } |
| 234 | |
| 235 | void ClMatMulNativeMMULKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) |
| 236 | { |
| 237 | ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); |
| 238 | ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); |
| 239 | |
| 240 | const ICLTensor *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0)); |
| 241 | const ICLTensor *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1)); |
| 242 | ICLTensor *dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); |
| 243 | ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst); |
| 244 | ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst); |
| 245 | unsigned int idx = 0; |
| 246 | |
| 247 | add_3d_tensor_nhw_argument(idx, lhs); |
| 248 | add_3d_tensor_nhw_argument(idx, rhs); |
| 249 | add_3d_tensor_nhw_argument(idx, dst); |
| 250 | |
| 251 | // Pass m and n at runtime as signed ints, to ensure results of any subtractions they could be operand in, would still be signed. |
| 252 | _kernel.setArg<cl_int>(idx++, _m); |
| 253 | _kernel.setArg<cl_int>(idx++, _n); |
Ramy Elgammal | c952596 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 254 | _kernel.setArg<cl_int>(idx++, _k); |
SiCong Li | a8d8058 | 2023-05-19 14:23:37 +0100 | [diff] [blame] | 255 | |
| 256 | // LWS_x should be multiple of 16 at least. (32, 2) has been chosen to have more work-items on a single core |
| 257 | // LWS also enforces the order of execution of the work items which improves cache utilization |
| 258 | enqueue(queue, *this, window, cl::NDRange(32, 2), false); |
| 259 | } |
| 260 | |
| 261 | } // namespace kernels |
| 262 | } // namespace opencl |
| 263 | } // namespace arm_compute |