blob: 2ebd76e1bf24c09cb326b2a924ad922e683de1c7 [file] [log] [blame]
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001/*
George Wort2d7e6832019-02-22 16:37:41 +00002 * Copyright (c) 2018-2019 ARM Limited.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/ICLTensor.h"
28#include "arm_compute/core/Error.h"
29#include "arm_compute/core/Helpers.h"
30#include "arm_compute/core/TensorInfo.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Utils.h"
33#include "arm_compute/core/Validate.h"
34#include "arm_compute/core/Window.h"
35#include "support/ToolchainSupport.h"
36
37#include <cstddef>
38#include <cstdint>
39
Gian Marco Iodice4b908652018-10-18 10:21:02 +010040namespace arm_compute
41{
Gian Marco Iodice4b908652018-10-18 10:21:02 +010042namespace
43{
44Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000045 int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +010046{
47 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
48 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
Gian Marco Iodice4b908652018-10-18 10:21:02 +010049 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 255);
50 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < 0 || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
51
52 if(bias != nullptr)
53 {
54 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
55 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
56 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
57 }
58
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +000059 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
60 ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
61 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
62 ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
63 if(output_stage.is_quantized_per_channel)
64 {
65 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_shifts->dimension(0));
66 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_multipliers->dimension(0));
67 }
68
Gian Marco Iodice4b908652018-10-18 10:21:02 +010069 // If a_offset == 0, vector_sum_col can be a nullptr
70 if(a_offset != 0)
71 {
72 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
73 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
74 }
75
76 // If b_offset == 0, vector_sum_row can be a nullptr
77 if(b_offset != 0)
78 {
79 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
80
81 // Check if input is a 3D reinterpretation
82 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
83
84 // Validate input
85 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
86 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
87
88 TensorShape output_shape = mm_result->tensor_shape();
89 if(output_shape.num_dimensions() > 1)
90 {
91 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
92
93 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
94 vector_sum_row_shape.collapse_from(1);
95 output_shape.collapse_from(output_batch_idx);
96
97 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
98 "mm_result tensor must have the same number of batches of output tensor");
99
100 if(a_offset != 0)
101 {
102 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
103 vector_sum_col_shape.collapse_from(1);
104
105 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
106 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
107 }
108 }
109 }
110
111 if(output->total_size() != 0)
112 {
113 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
114 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
115 }
116
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000117 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(),
118 "per channel quantization info is incorrect");
119
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100120 return Status{};
121}
122
123std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias, ITensorInfo *output,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000124 int32_t a_offset, int32_t b_offset, ITensorInfo *output_multipliers, ITensorInfo *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100125{
126 constexpr unsigned int num_elems_processed_per_iteration = 4;
127 bool window_changed = false;
128
129 // Auto initialize the output
130 auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
131
132 // Configure kernel window
133 Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
134
135 AccessWindowHorizontal mm_result_access(mm_result, 0, num_elems_processed_per_iteration);
136 window_changed = window_changed || update_window_and_padding(win, mm_result_access);
137
138 AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
139 window_changed = window_changed || update_window_and_padding(win, output_access);
140
141 if(a_offset != 0)
142 {
143 AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration);
144 window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
145 }
146 if(b_offset != 0)
147 {
148 AccessWindowStatic vector_sum_row_access(vector_sum_row, 0, 0, vector_sum_row->dimension(0), 0); // NOLINT
149 window_changed = window_changed || update_window_and_padding(win, vector_sum_row_access);
150 }
151
152 if(bias != nullptr)
153 {
154 AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]);
155 window_changed = window_changed || update_window_and_padding(win, bias_access);
156 }
157
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000158 if(output_multipliers->dimension(0) > 1)
159 {
160 AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_processed_per_iteration);
161 AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration);
162 window_changed = window_changed || update_window_and_padding(win, output_multipliers_access, output_shifts_access);
163 }
164
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100165 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
166 return std::make_pair(err, win);
167}
168} // namespace
169
170CLGEMMLowpOffsetContributionOutputStageKernel::CLGEMMLowpOffsetContributionOutputStageKernel()
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000171 : _mm_result(nullptr),
172 _vector_sum_col(nullptr),
173 _vector_sum_row(nullptr),
174 _bias(nullptr),
175 _output(nullptr),
176 _output_multipliers(nullptr),
177 _output_shifts(nullptr),
178 _is_quantized_per_channel(false)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100179{
180}
181
182void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000183 int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
184 const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100185{
186 // Perform validate step
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000187 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output, output_multipliers, output_shifts);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100188 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
189 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
190 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
191 bias != nullptr ? bias->info() : nullptr,
192 output->info(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000193 a_offset, b_offset, output_stage,
194 output_multipliers->info(), output_shifts->info())); // NOLINT
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100195
196 const int min = output_stage.gemmlowp_min_bound;
197 const int max = output_stage.gemmlowp_max_bound;
198
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000199 _vector_sum_col = vector_sum_col;
200 _vector_sum_row = vector_sum_row;
201 _mm_result = mm_result;
202 _bias = bias;
203 _output = output;
204 _output_multipliers = output_multipliers;
205 _output_shifts = output_shifts;
206 _is_quantized_per_channel = output_stage.is_quantized_per_channel;
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100207
208 // Check if input is a 3D reinterpretation
209 const bool reinterpret_as_3d = vector_sum_row != nullptr
210 && mm_result->info()->num_dimensions() > 1
211 && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
212
213 // Set the arguments to pass at compile time
214 CLBuildOptions build_opts;
215
216 // If a_offset == 0, vector_sum_col can be a nullptr
217 if(a_offset != 0)
218 {
219 build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
220 build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
221 }
222 // If b_offset == 0, vector_sum_row can be a nullptr
223 build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
224 build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
225 build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(1)));
226 build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(2)));
227 build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
228 build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000229 build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
230 build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
231 build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100232 build_opts.add_option_if((min != 0) && (min != max), "-DMIN_BOUND=" + support::cpp11::to_string(min));
233 build_opts.add_option_if((max != 255) && (min != max), "-DMAX_BOUND=" + support::cpp11::to_string(max));
234
235 std::string kernel_name("gemmlowp_offset_contribution");
236
237 // Fuse output stage
238 if(output_stage.type != GEMMLowpOutputStageType::NONE)
239 {
240 kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
241 }
242 else
243 {
244 ARM_COMPUTE_ERROR("GEMMLowpOutputStage can not be NONE!");
245 }
246
247 // Create kernel
248 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
249
250 // Configure kernel window
251 auto win_config = validate_and_configure_window(mm_result->info(),
252 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
253 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
254 bias != nullptr ? bias->info() : nullptr,
255 output->info(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000256 a_offset, b_offset,
257 output_multipliers->info(), output_shifts->info()); // NOLINT
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100258 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
259 ICLKernel::configure_internal(win_config.second);
260
261 // Set config_id for enabling LWS tuning
262 _config_id = kernel_name + "_";
263 _config_id += support::cpp11::to_string(mm_result->info()->dimension(0));
264 _config_id += "_";
265 _config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
266 _config_id += "_";
267 _config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
268}
269
270Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000271 const ITensorInfo *output, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
272 const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100273{
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000274 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100275 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(),
276 vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
277 vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
278 bias != nullptr ? bias->clone().get() : nullptr,
279 output->clone().get(),
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000280 a_offset, b_offset,
281 output_multipliers->clone().get(), output_shifts->clone().get())
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100282 .first); // NOLINT
283
284 return Status{};
285}
286
287void CLGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, cl::CommandQueue &queue)
288{
289 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
290 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
291
292 Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
293 Window slice = collapsed.first_slice_window_3D();
294
295 // Set window for vector_sum_col
296 Window win_vector_sum_col = slice;
297 win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
298 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
299
300 // Set window for vector_sum_row
301 Window win_vector_sum_row = slice;
302 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
303 win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
304 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
305
306 Window biases_slice = slice;
307 biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
308 biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
309
310 do
311 {
312 unsigned int idx = 0;
313 add_3D_tensor_argument(idx, _mm_result, slice);
Michalis Spyroue1651a52019-07-11 15:00:49 +0100314 add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col);
315 add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
316 add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100317 add_3D_tensor_argument(idx, _output, slice);
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000318 add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_multipliers, biases_slice);
319 add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_shifts, biases_slice);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100320 enqueue(queue, *this, slice, lws_hint());
321 }
322 while(collapsed.slide_window_slice_3D(slice));
323}
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000324} // namespace arm_compute