blob: d39dcdb336c9858d72447deac6739039dd28415d [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/AccessWindowTranspose.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/CL/CLHelpers.h"
29#include "arm_compute/core/CL/CLKernelLibrary.h"
30#include "arm_compute/core/CL/ICLTensor.h"
31#include "arm_compute/core/CL/OpenCL.h"
32#include "arm_compute/core/Error.h"
Gian Marco Iodice3a3066b2017-06-23 13:38:14 +010033#include "arm_compute/core/FixedPoint.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034#include "arm_compute/core/Helpers.h"
35#include "arm_compute/core/Types.h"
36#include "arm_compute/core/Utils.h"
37#include "arm_compute/core/Validate.h"
38#include "arm_compute/core/Window.h"
39
40#include <set>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010041#include <string>
42
43using namespace arm_compute;
44
45CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel()
46 : _input0(nullptr), _input1(nullptr), _output(nullptr)
47{
48}
49
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010050void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, bool is_interleaved_transposed)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051{
Gian Marco Iodice8a383692017-07-03 17:41:47 +010052 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010053 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
Gian Marco Iodice3a3066b2017-06-23 13:38:14 +010054 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010055 if(!is_interleaved_transposed)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010056 {
57 ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
58 }
59
60 _input0 = input0;
61 _input1 = input1;
62 _output = output;
63
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000064 const DataType data_type = input0->info()->data_type();
65 const int fp_pos = input0->info()->fixed_point_position();
66
67 // Get target architecture
68 GPUTarget arch_target = get_arch_from_target(get_target());
69
70 // Configure LWS hint
71 _lws_hint = (output->info()->dimension(1) == 196) ? cl::NDRange(1, 7) : cl::NDRange(8, 8);
72
73 // Create build options
74 CLBuildOptions build_opts;
75 build_opts.add_option_if(is_data_type_fixed_point(data_type), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(fp_pos));
76
77 const bool multiply_alpha = std::abs(1.0f - alpha) > 0.00001f;
78
79 // Only define ALPHA when alpha is not 1.0f. This avoids performing unnecessary multiplications.
80 if(multiply_alpha)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010081 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000082 build_opts.add_option_if_else(is_data_type_fixed_point(data_type),
83 "-DALPHA=" + support::cpp11::to_string((data_type == DataType::QS8 ? sqcvt_qs8_f32(alpha, fp_pos) : sqcvt_qs16_f32(alpha, fp_pos))),
84 "-DALPHA=" + float_to_string_with_full_precision(alpha));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010085 }
86
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000087 std::string kernel_name;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010088 if(is_interleaved_transposed)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010089 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000090 build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0)));
91 if(data_type == DataType::F32)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010092 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000093 kernel_name = "gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010094 }
95 else
96 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000097 kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010098 }
99
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000100 // Configure kernel window
101 const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100102 constexpr unsigned int num_elems_processed_per_iteration_y = 4;
103
104 Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
105
106 AccessWindowRectangle input0_access(input0->info(), 0, 0, num_elems_processed_per_iteration_y, 1, 1.f, 0.25f);
107 AccessWindowTranspose input1_access(input1->info(), 0, 0, num_elems_processed_per_iteration_x, 1, 0.f, 0.25f);
108 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
109
110 update_window_and_padding(win, input0_access, input1_access, output_access);
111
112 output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
113
114 ICLKernel::configure(win);
115 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100116 else // The input tensors have not been reshaped
117 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000118 build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0)));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100119
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000120 // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
121 unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100122 const unsigned int num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->info()->dimension(1)), 4);
123
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000124 // Create kernels according to the architecture, data type and input size.
125 if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100126 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000127 // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
128 // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
129 // FC6 and FC7 of AlexNet and VGG-16).
130 if(input1->info()->dimension(0) <= 1000)
131 {
132 // Each work-item processes 2 elements in the X dimension.
133 num_elems_processed_per_iteration_x = 2;
134 kernel_name = "gemm_mm_floating_point_f32_bifrost_1000";
135 }
136 else
137 {
138 // Each work-item processes 4 elements in the X dimension (as in the default case).
139 num_elems_processed_per_iteration_x = 4;
140 kernel_name = "gemm_mm_floating_point_f32_bifrost";
141 }
142 // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
143 // via exhaustive autotuning over a range of representative layer configurations.
144 _lws_hint = cl::NDRange(4);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100145 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000146 else if(is_data_type_fixed_point(data_type))
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100147 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000148 kernel_name = "gemm_mm_" + lower_string(string_from_data_type(data_type));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100149 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000150 else // (MIDGARD and F32) or (F16)
151 {
152 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
153 kernel_name = "gemm_mm_floating_point";
154 }
155 build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + support::cpp11::to_string(num_elems_processed_per_iteration_y));
156 build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_X=" + support::cpp11::to_string(num_elems_processed_per_iteration_x));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100157
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000158 // Configure window
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100159 Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
160
161 AccessWindowStatic input0_access(input0->info(), 0, 0, input0->info()->dimension(0), ceil_to_multiple(input0->info()->dimension(1), num_elems_processed_per_iteration_y));
162 AccessWindowStatic input1_access(input1->info(), 0, 0, ceil_to_multiple(input1->info()->dimension(0), num_elems_processed_per_iteration_x), input1->info()->dimension(1));
163 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
164
165 update_window_and_padding(win, input0_access, input1_access, output_access);
166
167 Coordinates coord;
168 coord.set_num_dimensions(output->info()->num_dimensions());
169 output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape()));
170
171 ICLKernel::configure(win);
172 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000173
174 // Create kernel
175 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
176
177 // Set config_id for enabling LWS tuning
178 _config_id = "gemm_";
179 _config_id += (is_interleaved_transposed ? "reshaped_" : "");
180 _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
181 _config_id += "_";
182 _config_id += support::cpp11::to_string(output->info()->dimension(1));
183 _config_id += "_";
184 _config_id += support::cpp11::to_string(output->info()->dimension(0));
185 _config_id += "_";
186 _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100187}
188
189void CLGEMMMatrixMultiplyKernel::run(const Window &window, cl::CommandQueue &queue)
190{
191 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
192 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
193
194 Window slice = window.first_slice_window_2D();
195 Window slice_matrix_b = slice;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100196
197 slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
198 slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100199
200 do
201 {
202 Window slice_b = slice;
203 // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
204 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
205 if(_input1->info()->num_dimensions() < 3)
206 {
207 slice_b = slice_matrix_b;
208 }
209
210 unsigned int idx = 0;
211 add_2D_tensor_argument(idx, _input0, slice);
212 add_2D_tensor_argument(idx, _input1, slice_b);
213 add_2D_tensor_argument(idx, _output, slice);
214 enqueue(queue, *this, slice, _lws_hint);
215 }
216 while(window.slide_window_slice_2D(slice));
217}