blob: 030c11d069f81f0a12d34658df80b5d853482157 [file] [log] [blame]
Freddie Liardete572dff2022-05-16 14:09:10 +01001/*
Matthew Bentham314d3e22023-06-23 10:53:52 +00002 * Copyright (c) 2022-2023 Arm Limited.
Freddie Liardete572dff2022-05-16 14:09:10 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel.h"
25
26#include "arm_compute/core/CL/CLHelpers.h"
27#include "arm_compute/core/CL/ICLTensor.h"
28#include "arm_compute/core/TensorInfo.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010029#include "arm_compute/core/utils/ActivationFunctionUtils.h"
Freddie Liardete572dff2022-05-16 14:09:10 +010030#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Matthew Bentham314d3e22023-06-23 10:53:52 +000031#include "arm_compute/core/utils/StringUtils.h"
Freddie Liardete572dff2022-05-16 14:09:10 +010032
33#include "src/core/helpers/AutoConfiguration.h"
34#include "src/core/helpers/WindowHelpers.h"
Freddie Liardete572dff2022-05-16 14:09:10 +010035#include "support/Cast.h"
36
37namespace arm_compute
38{
39namespace opencl
40{
41namespace kernels
42{
43using namespace misc::shape_calculator;
44
45namespace
46{
47using ElementsProcessed = Steps;
48
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010049Status validate_arguments(const ITensorInfo *src0,
50 const ITensorInfo *src1,
51 const ITensorInfo *dst,
52 const GEMMKernelInfo &gemm_info,
53 const ITensorInfo *vector_sum_col,
54 const ITensorInfo *vector_sum_row,
55 const ITensorInfo *bias,
56 const ITensorInfo *output_multipliers,
57 const ITensorInfo *output_shifts)
Freddie Liardete572dff2022-05-16 14:09:10 +010058{
59 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010060 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()),
61 "The extension cl_arm_matrix_multiply is not supported on the target platform");
Freddie Liardete572dff2022-05-16 14:09:10 +010062 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
63 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010064 ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4,
65 "The number of dimensions for the LHS matrix must be <= 4");
66 ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3,
67 "The number of dimensions for the RHS matrix must be <= 3");
Freddie Liardete572dff2022-05-16 14:09:10 +010068
69 const GEMMRHSMatrixInfo rhs_info = gemm_info.rhs_info;
70 const GEMMLHSMatrixInfo lhs_info = gemm_info.lhs_info;
71 const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage;
72
73 ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.k0 != 4 || lhs_info.k0 != 4, "Only 4 is supported as value for k0");
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010074 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(lhs_info.m0 == 1 || lhs_info.m0 == 2 || lhs_info.m0 == 4),
75 "Only 1,2,4 are supported for m0");
76 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(rhs_info.n0 == 1 || rhs_info.n0 == 4 || rhs_info.n0 == 8),
77 "Only 1,4,8 are supported for n0");
Freddie Liardete572dff2022-05-16 14:09:10 +010078 ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.export_to_cl_image, "Export to CLImage not supported for quantized GEMM");
79
80 const int m = gemm_info.m;
81 const int n = gemm_info.n;
82 const int k = gemm_info.k;
83
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010084 TensorShape tensor_shape1{src1->tensor_shape()};
Freddie Liardete572dff2022-05-16 14:09:10 +010085 tensor_shape1.set(0, n);
86 tensor_shape1.set(1, k);
87
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010088 const TensorInfo tensor_info1 = src1->clone()->set_tensor_shape(tensor_shape1);
89 const TensorInfo tensor_info_reshaped1 =
90 src1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
Freddie Liardete572dff2022-05-16 14:09:10 +010091
92 ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(0) != static_cast<unsigned int>(k));
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010093 if (gemm_info.reinterpret_input_as_3d)
Freddie Liardete572dff2022-05-16 14:09:10 +010094 {
95 ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) * src0->dimension(2) != static_cast<unsigned int>(m));
96 }
97 else
98 {
99 ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) != static_cast<unsigned int>(m));
100 }
101 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1);
102
103 const TensorShape expected_dst_shape = compute_mm_shape(*src0, *src1, gemm_info);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100104 if (dst->total_size() != 0)
Freddie Liardete572dff2022-05-16 14:09:10 +0100105 {
106 const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_dst_shape);
107 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100108 if (output_stage.type == GEMMLowpOutputStageType::NONE)
Freddie Liardete572dff2022-05-16 14:09:10 +0100109 {
110 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32);
111 }
112 else
113 {
114 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst);
115 }
116 }
117
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100118 if (bias != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100119 {
120 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
121 ARM_COMPUTE_RETURN_ERROR_ON(expected_dst_shape[0] != bias->dimension(0));
122 }
123
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100124 ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN) ||
125 (output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT),
Freddie Liardete572dff2022-05-16 14:09:10 +0100126 "Only GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT is supported");
127
128 // Checks performed if the dst stage needs to be fused
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100129 if (output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
Freddie Liardete572dff2022-05-16 14:09:10 +0100130 {
131 // If a_offset == 0, vector_sum_col can be a nullptr
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100132 if (gemm_info.a_offset != 0)
Freddie Liardete572dff2022-05-16 14:09:10 +0100133 {
134 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
135 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != expected_dst_shape[0]);
136 }
137
138 // If b_offset == 0, vector_sum_row can be a nullptr
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100139 if (gemm_info.b_offset != 0)
Freddie Liardete572dff2022-05-16 14:09:10 +0100140 {
141 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
142
143 // Check if mm result is a 3D reinterpretation
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100144 const bool reinterpret_as_3d =
145 expected_dst_shape.num_dimensions() > 1 && expected_dst_shape.y() != vector_sum_row->tensor_shape().x();
Freddie Liardete572dff2022-05-16 14:09:10 +0100146
147 // Validate input
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100148 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) !=
149 (expected_dst_shape[1] * expected_dst_shape[2]));
Freddie Liardete572dff2022-05-16 14:09:10 +0100150 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != expected_dst_shape[1]);
151
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100152 if (expected_dst_shape.num_dimensions() > 1)
Freddie Liardete572dff2022-05-16 14:09:10 +0100153 {
154 const unsigned int dst_batch_idx = reinterpret_as_3d ? 3 : 2;
155
156 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
157 vector_sum_row_shape.collapse_from(1);
158 TensorShape collapsed_dst_shape(expected_dst_shape);
159 collapsed_dst_shape.collapse_from(dst_batch_idx);
160
161 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != collapsed_dst_shape[dst_batch_idx],
162 "vector_sum_row must have the same number of batches of dst tensor");
163
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100164 if (gemm_info.a_offset != 0)
Freddie Liardete572dff2022-05-16 14:09:10 +0100165 {
166 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
167 vector_sum_col_shape.collapse_from(1);
168
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100169 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 &&
170 vector_sum_col_shape[1] != vector_sum_row_shape[1],
171 "vector_sum_col tensor must have the same number of batches of "
172 "vector_sum_row_shape or the number of batches must be set to 1");
Freddie Liardete572dff2022-05-16 14:09:10 +0100173 }
174 }
175 }
176
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100177 if (dst->total_size() != 0)
Freddie Liardete572dff2022-05-16 14:09:10 +0100178 {
179 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != dst->data_type());
180 }
181 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
182
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100183 if (output_multipliers != nullptr && output_shifts != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100184 {
185 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
186 ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
187 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
188 ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100189 if (output_stage.is_quantized_per_channel)
Freddie Liardete572dff2022-05-16 14:09:10 +0100190 {
191 ARM_COMPUTE_RETURN_ERROR_ON(expected_dst_shape[0] != output_shifts->dimension(0));
192 ARM_COMPUTE_RETURN_ERROR_ON(expected_dst_shape[0] != output_multipliers->dimension(0));
193 }
194 }
195 }
196 return Status{};
197}
198
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100199std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *src0,
200 const ITensorInfo *src1,
201 ITensorInfo *dst,
202 const GEMMKernelInfo &gemm_info,
203 ITensorInfo *vector_sum_col,
204 const ITensorInfo *vector_sum_row,
205 ITensorInfo *bias,
206 ITensorInfo *output_multipliers,
207 ITensorInfo *output_shifts,
208 ElementsProcessed &num_elements_processed)
Freddie Liardete572dff2022-05-16 14:09:10 +0100209{
210 const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage;
211
212 unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
213 unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
214 bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d != 0);
215
216 Window win{};
217 bool window_changed = false;
218
219 constexpr unsigned int mmul_n0 = 4;
220 constexpr unsigned int mmul_m0 = 4;
221 constexpr unsigned int mmul_k0 = 16;
222
223 reinterpret_output_as_3d = false;
224 // dst tensor auto initialization if not yet initialized
225 const TensorShape expected_dst_shape = compute_mm_shape(*src0, *src1, gemm_info);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100226 if (output_stage.type != GEMMLowpOutputStageType::NONE)
Freddie Liardete572dff2022-05-16 14:09:10 +0100227 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100228 auto_init_if_empty(
229 *dst, src0->clone()->set_tensor_shape(expected_dst_shape).set_data_type(output_stage.output_data_type));
Freddie Liardete572dff2022-05-16 14:09:10 +0100230 }
231 else
232 {
233 auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(expected_dst_shape).set_data_type(DataType::S32));
234 }
235
236 TensorInfo tmp_info(*dst);
237
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100238 if (reinterpret_output_as_3d)
Freddie Liardete572dff2022-05-16 14:09:10 +0100239 {
240 // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
241 // the window needs to be constructed on the 2D collapsed version of the tensor
242 TensorShape tmp_shape(dst->tensor_shape());
243 tmp_shape.collapse(2U, 1U);
244 tmp_info.set_tensor_shape(tmp_shape);
245 }
246
247 // Configure kernel window
248 num_elems_processed_per_iteration_x = 1;
249 num_elems_processed_per_iteration_y = 1;
250
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100251 win =
252 calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
Freddie Liardete572dff2022-05-16 14:09:10 +0100253
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100254 if (output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
Freddie Liardete572dff2022-05-16 14:09:10 +0100255 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100256 if (gemm_info.a_offset != 0)
Freddie Liardete572dff2022-05-16 14:09:10 +0100257 {
258 AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration_x);
259 window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
260 }
261 // No access window needed for vector_sum_row
262 ARM_COMPUTE_UNUSED(vector_sum_row);
263
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100264 if (bias != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100265 {
266 AccessWindowHorizontal bias_access(bias, 0, num_elems_processed_per_iteration_x);
267 window_changed = window_changed || update_window_and_padding(win, bias_access);
268 }
269
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100270 if (output_multipliers != nullptr && output_stage.is_quantized_per_channel)
Freddie Liardete572dff2022-05-16 14:09:10 +0100271 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100272 AccessWindowHorizontal output_multipliers_access(output_multipliers, 0,
273 num_elems_processed_per_iteration_x);
Freddie Liardete572dff2022-05-16 14:09:10 +0100274 AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration_x);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100275 window_changed =
276 window_changed || update_window_and_padding(win, output_multipliers_access, output_shifts_access);
Freddie Liardete572dff2022-05-16 14:09:10 +0100277 }
278 }
279
280 // Collapse along the Z direction
281 // This collapse needs to be here in order to tune the Z dimension of LWS
282 const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
283 Window collapsed = win.collapse(win, dimension_to_collapse);
284
285 // Reconfigure window size, one arm_matrix_multiply kernel needs 16 threads to finish.
286 Window::Dimension x_dimension = collapsed.x();
287 Window::Dimension y_dimension = collapsed.y();
288
289 // Make M and N multiple of M0 and N0 respectively
290 const unsigned int ceil_to_multiple_n_n0 = ceil_to_multiple(x_dimension.end(), gemm_info.rhs_info.n0);
291 const unsigned int ceil_to_multiple_m_m0 = ceil_to_multiple(y_dimension.end(), gemm_info.lhs_info.m0);
292
293 // Divide M and N by M0 and N0 respectively
294 const unsigned int n_div_n0 = ceil_to_multiple_n_n0 / gemm_info.rhs_info.n0;
295 const unsigned int m_div_m0 = ceil_to_multiple_m_m0 / gemm_info.lhs_info.m0;
296
297 // Make n_div_n0 and m_div_m0 multiple of mmul_n0 and mmul_k0 respectively
298 const unsigned int ceil_to_multiple_n_div_n0_mmul_n0 = ceil_to_multiple(n_div_n0, mmul_n0);
299 const unsigned int ceil_to_multiple_m_div_m0_mmul_m0 = ceil_to_multiple(m_div_m0, mmul_k0);
300
301 // Ensure x_dimension is multiple of MMUL block size (mmul_n0 * mmul_m0)
302 x_dimension.set_end(ceil_to_multiple_n_div_n0_mmul_n0 * mmul_n0);
303 y_dimension.set_end(ceil_to_multiple_m_div_m0_mmul_m0 / mmul_m0);
304
305 collapsed.set(Window::DimX, x_dimension);
306 collapsed.set(Window::DimY, y_dimension);
307
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100308 Status err =
309 (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
Freddie Liardete572dff2022-05-16 14:09:10 +0100310 return std::make_pair(err, collapsed);
311}
312} // namespace
313
314ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel::ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel()
315{
316 _type = CLKernelType::GEMM;
317}
318
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100319void ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel::configure(const CLCompileContext &compile_context,
320 const ITensorInfo *src0,
321 const ITensorInfo *src1,
322 ITensorInfo *dst,
323 const GEMMKernelInfo &gemm_info,
324 ITensorInfo *vector_sum_col,
325 const ITensorInfo *vector_sum_row,
326 ITensorInfo *bias,
327 ITensorInfo *output_multipliers,
328 ITensorInfo *output_shifts)
Freddie Liardete572dff2022-05-16 14:09:10 +0100329{
330 ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100331 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, dst, gemm_info, vector_sum_col, vector_sum_row, bias,
332 output_multipliers, output_shifts));
Freddie Liardete572dff2022-05-16 14:09:10 +0100333
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100334 auto padding_info = get_padding_info({src0, src1, dst, vector_sum_row});
Freddie Liardete572dff2022-05-16 14:09:10 +0100335 const GEMMRHSMatrixInfo rhs_info = gemm_info.rhs_info;
336 const GEMMLHSMatrixInfo lhs_info = gemm_info.lhs_info;
337 const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage;
338 const int32_t a_offset = gemm_info.a_offset;
339 const int32_t b_offset = gemm_info.b_offset;
340 constexpr int mmul_m0 = 4;
341 constexpr int mmul_n0 = 4;
342 constexpr int mmul_k0 = 16;
343
344 _m = gemm_info.m;
345 _n = gemm_info.n;
346 _k = gemm_info.k;
347
348 ElementsProcessed num_elements_processed{};
349
350 // Configure kernel window
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100351 auto win_config = validate_and_configure_window(src0, src1, dst, gemm_info, vector_sum_col, vector_sum_row, bias,
352 output_multipliers, output_shifts, num_elements_processed);
Freddie Liardete572dff2022-05-16 14:09:10 +0100353 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
354 ICLKernel::configure_internal(win_config.second);
355
356 const unsigned int m0_leftover = _m % lhs_info.m0;
357 const unsigned int n0_leftover = _n % rhs_info.n0;
358
359 // Create build options
360 CLBuildOptions build_opts;
361 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src0->data_type()));
362 build_opts.add_option("-DVEC_TYPE=" + get_cl_type_from_data_type(src0->data_type()) + "4");
363 build_opts.add_option("-DACC_DATA_TYPE=int");
364 build_opts.add_option("-DOUT_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
365 build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
366 build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
367 build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
368 build_opts.add_option("-DM0_LEFTOVER=" + support::cpp11::to_string(m0_leftover));
369 build_opts.add_option("-DN0_LEFTOVER=" + support::cpp11::to_string(n0_leftover));
370 build_opts.add_option("-DMMUL_M0=" + support::cpp11::to_string(mmul_m0));
371 build_opts.add_option("-DMMUL_N0=" + support::cpp11::to_string(mmul_n0));
372 build_opts.add_option("-DMMUL_K0=" + support::cpp11::to_string(mmul_k0));
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100373 build_opts.add_option("-DACTIVATION_TYPE=" +
374 lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
Freddie Liardete572dff2022-05-16 14:09:10 +0100375 build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
376 build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
377
378 std::string kernel_name("gemmlowp_mm_reshaped_only_rhs_mmul");
379
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100380 if (output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
Freddie Liardete572dff2022-05-16 14:09:10 +0100381 {
382 build_opts.add_option("-DFUSED_OUTPUT_STAGE_FIXED_POINT");
383 _fuse_output_stage = true;
384 // If a_offset == 0, vector_sum_col can be a nullptr
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100385 if (a_offset != 0 && vector_sum_col != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100386 {
387 build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
388 build_opts.add_option_if(vector_sum_col->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
389 }
390 // If b_offset == 0, vector_sum_row can be a nullptr
391 build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
392 build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * src0->dimension(0)));
393 build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
394 build_opts.add_option_if(gemm_info.broadcast_bias == true, "-DBROADCAST_BIAS");
395 build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
396 build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
397 build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
398
399 const int min = output_stage.gemmlowp_min_bound;
400 const int max = output_stage.gemmlowp_max_bound;
401
402 PixelValue min_val{};
403 PixelValue max_val{};
404 std::tie(min_val, max_val) = get_min_max(dst->data_type());
405 build_opts.add_option_if(min != min_val.get<int32_t>(), "-DMIN_BOUND=" + support::cpp11::to_string(min));
406 build_opts.add_option_if(max != max_val.get<int32_t>(), "-DMAX_BOUND=" + support::cpp11::to_string(max));
407 }
408
409 // A macro guard to compile ONLY the kernel of interest
410 build_opts.add_option("-D" + upper_string(kernel_name));
411
412 // Create kernel
413 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
414
415 // Set config_id for enabling LWS tuning
416 _config_id = kernel_name;
417 _config_id += "_";
418 _config_id += (bias != nullptr ? "add_bias_" : "");
419 _config_id += (gemm_info.broadcast_bias ? "broadcast_bias_" : "");
420 _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
421 _config_id += lower_string(string_from_data_type(src0->data_type()));
422 _config_id += "_";
423 _config_id += support::cpp11::to_string(_m);
424 _config_id += "_";
425 _config_id += support::cpp11::to_string(_n);
426 _config_id += "_";
427 _config_id += support::cpp11::to_string(_k);
428 _config_id += "_";
429 _config_id += support::cpp11::to_string(lhs_info.m0);
430 _config_id += "_";
431 _config_id += support::cpp11::to_string(rhs_info.n0);
432
433 ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
434}
435
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100436Status ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel::validate(const ITensorInfo *src0,
437 const ITensorInfo *src1,
438 const ITensorInfo *dst,
439 const GEMMKernelInfo &gemm_info,
440 const ITensorInfo *vector_sum_col,
441 const ITensorInfo *vector_sum_row,
442 const ITensorInfo *bias,
443 const ITensorInfo *output_multipliers,
444 const ITensorInfo *output_shifts)
Freddie Liardete572dff2022-05-16 14:09:10 +0100445{
446 ElementsProcessed num_elements_processed{};
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100447 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, dst, gemm_info, vector_sum_col, vector_sum_row, bias,
448 output_multipliers, output_shifts));
449 ARM_COMPUTE_RETURN_ON_ERROR(
450 validate_and_configure_window(src0->clone().get(), src1->clone().get(), dst->clone().get(), gemm_info,
451 vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
452 vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
453 bias != nullptr ? bias->clone().get() : nullptr,
454 output_multipliers != nullptr ? output_multipliers->clone().get() : nullptr,
455 output_shifts != nullptr ? output_shifts->clone().get() : nullptr,
456 num_elements_processed)
457 .first);
Freddie Liardete572dff2022-05-16 14:09:10 +0100458
459 return Status{};
460}
461
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100462void ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel::run_op(ITensorPack &tensors,
463 const Window &window,
464 cl::CommandQueue &queue)
Freddie Liardete572dff2022-05-16 14:09:10 +0100465{
466 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
467 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
468
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100469 const auto src0 =
470 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
471 const auto src1 =
472 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
473 const auto src2 =
474 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
475 const auto vector_sum_col =
476 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_COL_SUM));
477 const auto vector_sum_row =
478 utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_ROW_SUM));
479 auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
Freddie Liardete572dff2022-05-16 14:09:10 +0100480
481 ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
482
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100483 if (src1->info()->num_dimensions() < 3)
Freddie Liardete572dff2022-05-16 14:09:10 +0100484 {
485 // The stride_z for matrix B must be zero if we do not slice
486 ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0);
487 }
488
489 cl::Image2D src1_image2d;
490
491 Window slice = window.first_slice_window_3D();
492
493 do
494 {
495 unsigned int idx = 0;
496
497 add_3d_tensor_nhw_argument(idx, src0);
498 add_3d_tensor_nhw_argument(idx, src1);
499
500 // Bias buffer (_add_bias == true)
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100501 if (src2 != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100502 {
503 add_3d_tensor_nhw_argument(idx, src2);
504 }
505 // dst buffer
506 add_3d_tensor_nhw_argument(idx, dst);
507
508 // Pass m, n and k at runtime as signed ints, to ensure results of any subtraction they could be operand in, would still be signed.
509 _kernel.setArg<cl_int>(idx++, _m);
510 _kernel.setArg<cl_int>(idx++, _n);
511 _kernel.setArg<cl_int>(idx++, _k);
512
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100513 if (_fuse_output_stage)
Freddie Liardete572dff2022-05-16 14:09:10 +0100514 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100515 if (vector_sum_col != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100516 {
517 add_3d_tensor_nhw_argument(idx, vector_sum_col);
518 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100519 if (vector_sum_row != nullptr)
Freddie Liardete572dff2022-05-16 14:09:10 +0100520 {
521 add_3d_tensor_nhw_argument(idx, vector_sum_row);
522 }
523 }
524
525 enqueue(queue, *this, slice, cl::NDRange(32, 2), false);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100526 } while (window.slide_window_slice_3D(slice));
Freddie Liardete572dff2022-05-16 14:09:10 +0100527}
528} // namespace kernels
529} // namespace opencl
530} // namespace arm_compute