blob: 8d3aff66033bc63b8b08797912dc6233e09ab147 [file] [log] [blame]
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +00001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/ICLTensor.h"
30#include "arm_compute/core/CL/OpenCL.h"
31#include "arm_compute/core/Error.h"
32#include "arm_compute/core/Helpers.h"
33#include "arm_compute/core/TensorInfo.h"
34#include "arm_compute/core/Types.h"
35#include "arm_compute/core/Utils.h"
36#include "arm_compute/core/Validate.h"
37#include "arm_compute/core/Window.h"
38#include "arm_compute/core/utils/misc/ShapeCalculator.h"
39#include "support/ToolchainSupport.h"
40
41#include <cstddef>
42#include <cstdint>
43#include <tuple>
44
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000045namespace arm_compute
46{
Michele Di Giorgiof9179d32019-11-27 16:17:30 +000047using namespace misc::shape_calculator;
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +000048
49namespace
50{
51using ElementsProcessed = Steps;
52
53Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
54 const GEMMReshapeInfo &gemm_info)
55{
56 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
57 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8);
58 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
59 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
60 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
61 ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.transpose);
62 ARM_COMPUTE_RETURN_ERROR_ON(!rhs_info.transpose);
63 ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 != rhs_info.k0);
64 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
65 ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
66 ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
67 ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
68
69 const int m = gemm_info.m();
70 const int n = gemm_info.n();
71 const int k = gemm_info.k();
72
73 TensorShape tensor_shape0{ input0->tensor_shape() };
74 tensor_shape0.set(0, k);
75 tensor_shape0.set(1, m);
76
77 TensorShape tensor_shape1{ input1->tensor_shape() };
78 tensor_shape1.set(0, n);
79 tensor_shape1.set(1, k);
80
81 const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0);
82 const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
83
84 const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_lhs_reshaped_shape(tensor_info0, lhs_info));
85 const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
86
87 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
88 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
89
90 if(output->total_size() != 0)
91 {
92 const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info));
93 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
94 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
95 }
96
97 return Status{};
98}
99
100std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
101 const GEMMReshapeInfo &gemm_info, ElementsProcessed &num_elements_processed)
102{
103 unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
104 unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
105 bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
106
107 Window win{};
108 Window win_out{};
109 bool window_changed = false;
110
111 // Output tensor auto initialization if not yet initialized
112 auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)).set_data_type(DataType::S32));
113
114 TensorInfo tmp_info(*output);
115
116 if(reinterpret_output_as_3d)
117 {
118 // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
119 // the window needs to be constructed on the 2D collapsed version of the tensor
120 TensorShape tmp_shape(output->tensor_shape());
121 tmp_shape.collapse(2U, 1U);
122 tmp_info.set_tensor_shape(tmp_shape);
123 }
124
125 // Configure kernel window
126 num_elems_processed_per_iteration_x = rhs_info.n0;
127 num_elems_processed_per_iteration_y = lhs_info.m0;
128
129 // Note: bottom paddings are calculated manually as the output can be reinterpreted as 3D tensor
130 // The only way to set properly the paddings, it is to set those explicitly through the AccessWindowStatic
131 const int m = gemm_info.m();
132 const int bottom_pad = (num_elems_processed_per_iteration_y - (m % num_elems_processed_per_iteration_y)) % num_elems_processed_per_iteration_y;
133
134 win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
135 win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
136
137 AccessWindowStatic input0_access(input0, 0, 0,
138 ceil_to_multiple(input0->dimension(0), num_elems_processed_per_iteration_y),
139 input0->dimension(1));
140 AccessWindowStatic input1_access(input1, 0, 0,
141 ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x),
142 input1->dimension(1));
143 AccessWindowStatic output_access(output, 0, 0,
144 ceil_to_multiple(output->dimension(0), num_elems_processed_per_iteration_x),
145 output->dimension(1) + bottom_pad);
146
147 window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
148 update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
149
150 output_access.set_valid_region(win_out, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
151
152 // Collapse along the Z direction
153 // This collapse needs to be here in order to tune the Z dimension of LWS
154 Window collapsed = win;
155 const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
156 collapsed = win.collapse(win, dimension_to_collapse);
157
158 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
159 return std::make_pair(err, collapsed);
160}
161} // namespace
162
163CLGEMMLowpMatrixMultiplyReshapedKernel::CLGEMMLowpMatrixMultiplyReshapedKernel()
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000164 : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_output_as_3d(false), _k(1), _use_dummy_work_items(false)
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000165{
166}
167
168void CLGEMMLowpMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info,
169 const GEMMReshapeInfo &gemm_info)
170{
171 ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
172
173 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info));
174
175 _input0 = input0;
176 _input1 = input1;
177 _output = output;
178 _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0);
179 _k = gemm_info.k();
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000180 _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000181
182 // Check if we need to slide the matrix B
183 const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions();
184 _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
185
186 ElementsProcessed num_elements_processed{};
187
188 // Configure kernel window
189 auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed);
190 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
191 ICLKernel::configure_internal(win_config.second);
192
193 // Create build options
194 CLBuildOptions build_opts;
195 build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
196 build_opts.add_option_if(_reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(1)));
197 build_opts.add_option_if(_reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(2)));
198 build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
199 build_opts.add_option_if(lhs_info.interleave, "-DLHS_INTERLEAVE");
200 build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000201 build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
202 build_opts.add_option("-DM=" + support::cpp11::to_string(gemm_info.m()));
203 build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n()));
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000204 build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
205 build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
206 build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
207 build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
208 build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000209 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
210 build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_dot8_acc_type_from_data_type(input0->info()->data_type()));
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000211
212 std::string kernel_name("gemmlowp_mm_reshaped_");
213 kernel_name += lhs_info.transpose ? "lhs_t_" : "lhs_nt_";
214 kernel_name += rhs_info.transpose ? "rhs_t" : "rhs_nt";
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000215
216 // Create kernel
217 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
218
219 // Set config_id for enabling LWS tuning
220 _config_id = kernel_name;
221 _config_id += "_";
Gian Marco Iodice43a129e2019-05-14 10:14:08 +0100222 _config_id += dot8_supported(CLKernelLibrary::get().get_device()) ? "_dot8" : "";
223 _config_id += "_";
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000224 _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
225 _config_id += support::cpp11::to_string(output->info()->dimension(1));
226 _config_id += "_";
227 _config_id += support::cpp11::to_string(output->info()->dimension(0));
228 _config_id += "_";
229 _config_id += support::cpp11::to_string(gemm_info.k());
230 _config_id += "_";
231 _config_id += support::cpp11::to_string(output->info()->dimension(2));
232 _config_id += "_";
233 _config_id += support::cpp11::to_string(lhs_info.m0);
234 _config_id += "_";
235 _config_id += support::cpp11::to_string(rhs_info.n0);
236 _config_id += "_";
237 _config_id += support::cpp11::to_string(lhs_info.k0);
238 _config_id += "_";
239 _config_id += support::cpp11::to_string(lhs_info.v0);
240 _config_id += "_";
241 _config_id += support::cpp11::to_string(rhs_info.h0);
242 _config_id += "_";
243 _config_id += support::cpp11::to_string(lhs_info.interleave);
244 _config_id += "_";
245 _config_id += support::cpp11::to_string(rhs_info.interleave);
246}
247
248Status CLGEMMLowpMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info,
249 const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info)
250{
251 ElementsProcessed num_elements_processed{};
252 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, lhs_info, rhs_info, gemm_info));
253 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
254 input1->clone().get(),
255 output->clone().get(),
256 lhs_info,
257 rhs_info,
258 gemm_info,
259 num_elements_processed)
260 .first);
261
262 return Status{};
263}
264
265void CLGEMMLowpMatrixMultiplyReshapedKernel::run(const Window &window, cl::CommandQueue &queue)
266{
267 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
268 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
269
270 if(_input1->info()->num_dimensions() < 3)
271 {
272 // The stride_z for matrix B must be zero if we do not slice
273 ARM_COMPUTE_ERROR_ON(_input1->info()->strides_in_bytes()[3] != 0);
274 }
275
276 Window slice = window.first_slice_window_3D();
277 Window slice_matrix_b = slice;
278
279 slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
280 slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
281
282 if(_reinterpret_output_as_3d)
283 {
284 // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
285 const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 4;
286 const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
287 _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
288 }
289
290 do
291 {
292 Window slice_b = slice;
293 // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
294 // This scenario can happen when the matrix multiplication is used to perform a convolution operation
295 if(!_slide_matrix_b)
296 {
297 slice_b = slice_matrix_b;
298 }
299
300 unsigned int idx = 0;
301 add_2D_tensor_argument(idx, _input0, slice);
302 add_2D_tensor_argument(idx, _input1, slice_b);
303 add_2D_tensor_argument(idx, _output, slice);
304 _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_k));
305 _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
306 _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
307 _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000308 enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
Gian Marco Iodicedb63b9c2019-01-17 09:47:04 +0000309 }
310 while(window.slide_window_slice_3D(slice));
Michele Di Giorgiof9179d32019-11-27 16:17:30 +0000311}
312} // namespace arm_compute