blob: 185226233776529a372b9eb4a9237373b11f3ab5 [file] [log] [blame]
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001/*
George Wort2d7e6832019-02-22 16:37:41 +00002 * Copyright (c) 2018-2019 ARM Limited.
Gian Marco Iodice4b908652018-10-18 10:21:02 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/ICLTensor.h"
28#include "arm_compute/core/Error.h"
29#include "arm_compute/core/Helpers.h"
30#include "arm_compute/core/TensorInfo.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Utils.h"
33#include "arm_compute/core/Validate.h"
34#include "arm_compute/core/Window.h"
35#include "support/ToolchainSupport.h"
36
37#include <cstddef>
38#include <cstdint>
39
40using namespace arm_compute;
41
42namespace arm_compute
43{
44class Coordinates;
45} // namespace arm_compute
46
47namespace
48{
49Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
50 int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage)
51{
52 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
53 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
Gian Marco Iodice4b908652018-10-18 10:21:02 +010054 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 255);
55 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < 0 || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
56
57 if(bias != nullptr)
58 {
59 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
60 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
61 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
62 }
63
64 // If a_offset == 0, vector_sum_col can be a nullptr
65 if(a_offset != 0)
66 {
67 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
68 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
69 }
70
71 // If b_offset == 0, vector_sum_row can be a nullptr
72 if(b_offset != 0)
73 {
74 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
75
76 // Check if input is a 3D reinterpretation
77 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
78
79 // Validate input
80 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
81 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
82
83 TensorShape output_shape = mm_result->tensor_shape();
84 if(output_shape.num_dimensions() > 1)
85 {
86 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
87
88 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
89 vector_sum_row_shape.collapse_from(1);
90 output_shape.collapse_from(output_batch_idx);
91
92 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
93 "mm_result tensor must have the same number of batches of output tensor");
94
95 if(a_offset != 0)
96 {
97 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
98 vector_sum_col_shape.collapse_from(1);
99
100 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
101 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
102 }
103 }
104 }
105
106 if(output->total_size() != 0)
107 {
108 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
109 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
110 }
111
112 return Status{};
113}
114
115std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias, ITensorInfo *output,
116 int32_t a_offset, int32_t b_offset)
117{
118 constexpr unsigned int num_elems_processed_per_iteration = 4;
119 bool window_changed = false;
120
121 // Auto initialize the output
122 auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
123
124 // Configure kernel window
125 Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
126
127 AccessWindowHorizontal mm_result_access(mm_result, 0, num_elems_processed_per_iteration);
128 window_changed = window_changed || update_window_and_padding(win, mm_result_access);
129
130 AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
131 window_changed = window_changed || update_window_and_padding(win, output_access);
132
133 if(a_offset != 0)
134 {
135 AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration);
136 window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
137 }
138 if(b_offset != 0)
139 {
140 AccessWindowStatic vector_sum_row_access(vector_sum_row, 0, 0, vector_sum_row->dimension(0), 0); // NOLINT
141 window_changed = window_changed || update_window_and_padding(win, vector_sum_row_access);
142 }
143
144 if(bias != nullptr)
145 {
146 AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]);
147 window_changed = window_changed || update_window_and_padding(win, bias_access);
148 }
149
150 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
151 return std::make_pair(err, win);
152}
153} // namespace
154
155CLGEMMLowpOffsetContributionOutputStageKernel::CLGEMMLowpOffsetContributionOutputStageKernel()
156 : _mm_result(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _output(nullptr)
157{
158}
159
160void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output,
161 int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage)
162{
163 // Perform validate step
164 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
165 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
166 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
167 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
168 bias != nullptr ? bias->info() : nullptr,
169 output->info(),
170 a_offset, b_offset, output_stage)); // NOLINT
171
172 const int min = output_stage.gemmlowp_min_bound;
173 const int max = output_stage.gemmlowp_max_bound;
174
175 _vector_sum_col = vector_sum_col;
176 _vector_sum_row = vector_sum_row;
177 _mm_result = mm_result;
178 _bias = bias;
179 _output = output;
180
181 // Check if input is a 3D reinterpretation
182 const bool reinterpret_as_3d = vector_sum_row != nullptr
183 && mm_result->info()->num_dimensions() > 1
184 && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
185
186 // Set the arguments to pass at compile time
187 CLBuildOptions build_opts;
188
189 // If a_offset == 0, vector_sum_col can be a nullptr
190 if(a_offset != 0)
191 {
192 build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
193 build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
194 }
195 // If b_offset == 0, vector_sum_row can be a nullptr
196 build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
197 build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
198 build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(1)));
199 build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(2)));
200 build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
201 build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
202 build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multiplier));
203 build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shift));
204 build_opts.add_option_if((min != 0) && (min != max), "-DMIN_BOUND=" + support::cpp11::to_string(min));
205 build_opts.add_option_if((max != 255) && (min != max), "-DMAX_BOUND=" + support::cpp11::to_string(max));
206
207 std::string kernel_name("gemmlowp_offset_contribution");
208
209 // Fuse output stage
210 if(output_stage.type != GEMMLowpOutputStageType::NONE)
211 {
212 kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
213 }
214 else
215 {
216 ARM_COMPUTE_ERROR("GEMMLowpOutputStage can not be NONE!");
217 }
218
219 // Create kernel
220 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
221
222 // Configure kernel window
223 auto win_config = validate_and_configure_window(mm_result->info(),
224 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
225 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
226 bias != nullptr ? bias->info() : nullptr,
227 output->info(),
228 a_offset, b_offset); // NOLINT
229 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
230 ICLKernel::configure_internal(win_config.second);
231
232 // Set config_id for enabling LWS tuning
233 _config_id = kernel_name + "_";
234 _config_id += support::cpp11::to_string(mm_result->info()->dimension(0));
235 _config_id += "_";
236 _config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
237 _config_id += "_";
238 _config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
239}
240
241Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
242 const ITensorInfo *output,
243 int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage)
244{
245 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
246 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(),
247 vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
248 vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
249 bias != nullptr ? bias->clone().get() : nullptr,
250 output->clone().get(),
251 a_offset, b_offset)
252 .first); // NOLINT
253
254 return Status{};
255}
256
257void CLGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, cl::CommandQueue &queue)
258{
259 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
260 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
261
262 Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
263 Window slice = collapsed.first_slice_window_3D();
264
265 // Set window for vector_sum_col
266 Window win_vector_sum_col = slice;
267 win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
268 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
269
270 // Set window for vector_sum_row
271 Window win_vector_sum_row = slice;
272 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
273 win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
274 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
275
276 Window biases_slice = slice;
277 biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
278 biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
279
280 do
281 {
282 unsigned int idx = 0;
283 add_3D_tensor_argument(idx, _mm_result, slice);
Michalis Spyroue1651a52019-07-11 15:00:49 +0100284 add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col);
285 add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
286 add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100287 add_3D_tensor_argument(idx, _output, slice);
288 enqueue(queue, *this, slice, lws_hint());
289 }
290 while(collapsed.slide_window_slice_3D(slice));
291}