blob: 16706dd748b1a23d58c8b3897dc9d26e45f4272a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/AccessWindowTranspose.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/CL/CLHelpers.h"
29#include "arm_compute/core/CL/CLKernelLibrary.h"
30#include "arm_compute/core/CL/ICLTensor.h"
31#include "arm_compute/core/CL/OpenCL.h"
32#include "arm_compute/core/Error.h"
Gian Marco Iodice3a3066b2017-06-23 13:38:14 +010033#include "arm_compute/core/FixedPoint.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034#include "arm_compute/core/Helpers.h"
35#include "arm_compute/core/Types.h"
36#include "arm_compute/core/Utils.h"
37#include "arm_compute/core/Validate.h"
38#include "arm_compute/core/Window.h"
39
40#include <set>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010041#include <string>
42
43using namespace arm_compute;
44
45CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel()
46 : _input0(nullptr), _input1(nullptr), _output(nullptr)
47{
48}
49
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010050void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, bool is_interleaved_transposed)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051{
Gian Marco Iodice8a383692017-07-03 17:41:47 +010052 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010053 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
Gian Marco Iodice3a3066b2017-06-23 13:38:14 +010054 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010055 if(!is_interleaved_transposed)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010056 {
57 ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
58 }
59
60 _input0 = input0;
61 _input1 = input1;
62 _output = output;
63
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000064 const DataType data_type = input0->info()->data_type();
65 const int fp_pos = input0->info()->fixed_point_position();
66
67 // Get target architecture
68 GPUTarget arch_target = get_arch_from_target(get_target());
69
70 // Configure LWS hint
Anthony Barbierfcd52fb2017-11-28 10:31:43 +000071 if(arch_target == GPUTarget::BIFROST && input1->info()->dimension(1) == 24)
72 {
73 // LWS optimized for the 11x11 AlexNet convolution on Bifrost.
74 _lws_hint = cl::NDRange(2, 2);
75 }
76 else if(output->info()->dimension(1) == 196)
77 {
78 _lws_hint = cl::NDRange(1, 7);
79 }
80 else
81 {
82 _lws_hint = cl::NDRange(8, 8);
83 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000084
85 // Create build options
86 CLBuildOptions build_opts;
87 build_opts.add_option_if(is_data_type_fixed_point(data_type), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(fp_pos));
88
89 const bool multiply_alpha = std::abs(1.0f - alpha) > 0.00001f;
90
91 // Only define ALPHA when alpha is not 1.0f. This avoids performing unnecessary multiplications.
92 if(multiply_alpha)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010093 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000094 build_opts.add_option_if_else(is_data_type_fixed_point(data_type),
95 "-DALPHA=" + support::cpp11::to_string((data_type == DataType::QS8 ? sqcvt_qs8_f32(alpha, fp_pos) : sqcvt_qs16_f32(alpha, fp_pos))),
96 "-DALPHA=" + float_to_string_with_full_precision(alpha));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010097 }
98
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +000099 std::string kernel_name;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100100 if(is_interleaved_transposed)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100101 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000102 build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0)));
103 if(data_type == DataType::F32)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100104 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000105 kernel_name = "gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106 }
107 else
108 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000109 kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100110 }
111
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000112 // Configure kernel window
113 const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100114 constexpr unsigned int num_elems_processed_per_iteration_y = 4;
115
116 Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
117
118 AccessWindowRectangle input0_access(input0->info(), 0, 0, num_elems_processed_per_iteration_y, 1, 1.f, 0.25f);
119 AccessWindowTranspose input1_access(input1->info(), 0, 0, num_elems_processed_per_iteration_x, 1, 0.f, 0.25f);
120 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
121
122 update_window_and_padding(win, input0_access, input1_access, output_access);
123
124 output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
125
126 ICLKernel::configure(win);
127 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100128 else // The input tensors have not been reshaped
129 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000130 build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0)));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100131
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000132 // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
133 unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100134 const unsigned int num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->info()->dimension(1)), 4);
135
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000136 // Create kernels according to the architecture, data type and input size.
137 if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100138 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000139 // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
140 // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
141 // FC6 and FC7 of AlexNet and VGG-16).
142 if(input1->info()->dimension(0) <= 1000)
143 {
144 // Each work-item processes 2 elements in the X dimension.
145 num_elems_processed_per_iteration_x = 2;
146 kernel_name = "gemm_mm_floating_point_f32_bifrost_1000";
147 }
148 else
149 {
150 // Each work-item processes 4 elements in the X dimension (as in the default case).
151 num_elems_processed_per_iteration_x = 4;
152 kernel_name = "gemm_mm_floating_point_f32_bifrost";
153 }
154 // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
155 // via exhaustive autotuning over a range of representative layer configurations.
156 _lws_hint = cl::NDRange(4);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100157 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000158 else if(is_data_type_fixed_point(data_type))
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100159 {
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000160 kernel_name = "gemm_mm_" + lower_string(string_from_data_type(data_type));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100161 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000162 else // (MIDGARD and F32) or (F16)
163 {
164 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
165 kernel_name = "gemm_mm_floating_point";
166 }
167 build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + support::cpp11::to_string(num_elems_processed_per_iteration_y));
168 build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_X=" + support::cpp11::to_string(num_elems_processed_per_iteration_x));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100169
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000170 // Configure window
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100171 Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
172
173 AccessWindowStatic input0_access(input0->info(), 0, 0, input0->info()->dimension(0), ceil_to_multiple(input0->info()->dimension(1), num_elems_processed_per_iteration_y));
174 AccessWindowStatic input1_access(input1->info(), 0, 0, ceil_to_multiple(input1->info()->dimension(0), num_elems_processed_per_iteration_x), input1->info()->dimension(1));
175 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
176
177 update_window_and_padding(win, input0_access, input1_access, output_access);
178
179 Coordinates coord;
180 coord.set_num_dimensions(output->info()->num_dimensions());
181 output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape()));
182
183 ICLKernel::configure(win);
184 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000185
186 // Create kernel
187 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
188
189 // Set config_id for enabling LWS tuning
190 _config_id = "gemm_";
191 _config_id += (is_interleaved_transposed ? "reshaped_" : "");
192 _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
193 _config_id += "_";
194 _config_id += support::cpp11::to_string(output->info()->dimension(1));
195 _config_id += "_";
196 _config_id += support::cpp11::to_string(output->info()->dimension(0));
197 _config_id += "_";
198 _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100199}
200
201void CLGEMMMatrixMultiplyKernel::run(const Window &window, cl::CommandQueue &queue)
202{
203 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
204 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
205
206 Window slice = window.first_slice_window_2D();
207 Window slice_matrix_b = slice;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100208
209 slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
210 slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100211
212 do
213 {
214 Window slice_b = slice;
215 // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
216 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
217 if(_input1->info()->num_dimensions() < 3)
218 {
219 slice_b = slice_matrix_b;
220 }
221
222 unsigned int idx = 0;
223 add_2D_tensor_argument(idx, _input0, slice);
224 add_2D_tensor_argument(idx, _input1, slice_b);
225 add_2D_tensor_argument(idx, _output, slice);
226 enqueue(queue, *this, slice, _lws_hint);
227 }
228 while(window.slide_window_slice_2D(slice));
229}