blob: f6dc4a8806c309b11fa50b84e0cb2cb5fd69f8c0 [file] [log] [blame]
Michalis Spyrou04f089c2017-08-08 17:42:38 +01001/*
Michalis Spyrouf6402dd2018-01-26 15:06:19 +00002 * Copyright (c) 2017-2018 ARM Limited.
Michalis Spyrou04f089c2017-08-08 17:42:38 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/ICLTensor.h"
Michalis Spyrou04f089c2017-08-08 17:42:38 +010030#include "arm_compute/core/Helpers.h"
31#include "arm_compute/core/TensorInfo.h"
32#include "arm_compute/core/Utils.h"
33#include "arm_compute/core/Validate.h"
34#include "arm_compute/core/Window.h"
35
36#include "support/ToolchainSupport.h"
37
38using namespace arm_compute;
39
John Richardson62385bc2018-04-20 13:11:36 +010040namespace
41{
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010042// OpenCL kernel requires input width to be a power of 2 for x-axis.
Michalis Spyrou25747e22018-08-08 17:12:38 +010043constexpr unsigned int border_val = 64;
44
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010045Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
John Richardson62385bc2018-04-20 13:11:36 +010046{
John Richardson62385bc2018-04-20 13:11:36 +010047 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010048 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Michalis Spyrou8aaf93e2018-10-11 17:33:32 +010049 ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::SUM_SQUARE && input->data_type() == DataType::QASYMM8, "Not supported reduction operation for QASYMM8");
John Richardson62385bc2018-04-20 13:11:36 +010050 ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010051 ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
52 ARM_COMPUTE_RETURN_ERROR_ON(op == ReductionOperation::MEAN_SUM && axis == 0 && width == 0 && input->data_type() != DataType::QASYMM8);
John Richardson62385bc2018-04-20 13:11:36 +010053
54 if(output->total_size() != 0)
55 {
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010056 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
Michalis Spyrou7930db42018-11-22 17:36:28 +000057 if(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN)
58 {
59 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8, "Not supported operation for QASYMM8");
60 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
61 }
62 else
63 {
64 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
65 }
John Richardson62385bc2018-04-20 13:11:36 +010066 }
67
68 return Status{};
69}
70
Michalis Spyrou7930db42018-11-22 17:36:28 +000071std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
John Richardson62385bc2018-04-20 13:11:36 +010072{
73 // Output tensor auto initialization if not yet initialized
74 TensorShape output_shape{ input->tensor_shape() };
75 output_shape.set(axis, 1);
Michalis Spyrou7930db42018-11-22 17:36:28 +000076 const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
77 DataType output_data_type = is_arg_min_max ? DataType::U32 : input->data_type();
78 auto_init_if_empty(*output, output_shape, 1, output_data_type);
John Richardson62385bc2018-04-20 13:11:36 +010079
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010080 const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
81 Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
82 bool window_changed = false;
John Richardson62385bc2018-04-20 13:11:36 +010083
Michalis Spyrou7e9391b2018-10-05 14:49:28 +010084 switch(axis)
85 {
86 case 0:
87 {
88 if(is_data_type_quantized(input->data_type()))
89 {
90 AccessWindowHorizontal input_access(input, 0, input->dimension(0));
91 AccessWindowHorizontal output_access(output, 0, 1);
92 window_changed = update_window_and_padding(win, input_access, output_access);
93 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
94 }
95 else
96 {
97 const unsigned int border_width = ((input->dimension(0) % border_val) != 0) ? border_val - input->dimension(0) % border_val : 0;
98 AccessWindowStatic input_access(input, 0, 0, input->dimension(0) + border_width, 1);
99 AccessWindowHorizontal output_access(output, 0, 1);
100 window_changed = update_window_and_padding(win, input_access, output_access);
101 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
102 }
103 }
104 break;
105 case 1:
106 case 2:
107 case 3:
108 {
109 AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
110 AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
111 window_changed = update_window_and_padding(win, input_access, output_access);
112 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
113 }
114 break;
115 default:
116 ARM_COMPUTE_ERROR("Not supported");
117 }
John Richardson62385bc2018-04-20 13:11:36 +0100118
119 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
120
121 return std::make_tuple(err, win);
122}
123} // namespace
124
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100125CLReductionOperationKernel::CLReductionOperationKernel()
126 : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE), _border_size()
127{
128}
129
130BorderSize CLReductionOperationKernel::border_size() const
131{
132 return _border_size;
133}
134
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100135void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width)
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100136{
John Richardson62385bc2018-04-20 13:11:36 +0100137 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100138
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100139 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op, width));
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100140
141 _input = input;
142 _output = output;
143 _reduction_axis = axis;
144 _op = op;
Michalis Spyrou343722b2018-06-05 13:04:40 +0100145
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100146 // Set build options
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100147 CLBuildOptions build_opts;
148 std::string data_type_promoted = get_cl_type_from_data_type(input->info()->data_type());
Michalis Spyrou7930db42018-11-22 17:36:28 +0000149 if(is_data_type_quantized(input->info()->data_type()))
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100150 {
151 data_type_promoted = "uint";
152 }
153 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
154 build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
Michalis Spyrou8aaf93e2018-10-11 17:33:32 +0100155 build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE=");
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100156 build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
Michalis Spyrou7930db42018-11-22 17:36:28 +0000157 build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
158 build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100159
160 switch(op)
161 {
162 case ReductionOperation::SUM_SQUARE:
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100163 build_opts.add_option(("-DOPERATION=square_sum"));
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100164 break;
165 case ReductionOperation::SUM:
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100166 case ReductionOperation::MEAN_SUM:
167 build_opts.add_option(("-DOPERATION=sum"));
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100168 break;
Michalis Spyrou7930db42018-11-22 17:36:28 +0000169 case ReductionOperation::ARG_IDX_MAX:
170 case ReductionOperation::ARG_IDX_MIN:
171 break;
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100172 default:
173 ARM_COMPUTE_ERROR("Unsupported reduction operation");
174 }
175
176 // Create kernel
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100177 cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
178 std::string kernel_axis_name;
Michalis Spyrou7930db42018-11-22 17:36:28 +0000179 const bool is_arg_op = (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100180 switch(axis)
181 {
182 case 0:
183 {
Michalis Spyrou7930db42018-11-22 17:36:28 +0000184 if(!is_data_type_quantized(input->info()->data_type()) && !is_arg_op)
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100185 {
186 build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
187 const unsigned int width_leftover = input->info()->dimension(0) % border_val;
188 const unsigned int border_width = (width_leftover != 0) ? border_val - width_leftover : 0;
189 const unsigned int num_of_threads = ((input->info()->dimension(0) + border_width) / 16);
190 kernel_axis_name = "x";
191
192 // Set the number of WG based on the input size. If input width is < 128
193 // we can use fewer threads than 8.
194 lws_hint = cl::NDRange(std::min(8U, num_of_threads));
195 _border_size = BorderSize(0, border_width, 0, 0);
196 }
197 else
198 {
199 build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
Michalis Spyrou7930db42018-11-22 17:36:28 +0000200 build_opts.add_option_if_else(_input->info()->data_type() == DataType::F32, "-DCOND_DATA_TYPE=int", "-DCOND_DATA_TYPE=short");
201 kernel_axis_name = "non_parallel_x";
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100202 }
203 }
204 break;
205 case 1:
206 build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
207 kernel_axis_name = "y";
208 break;
209 case 2:
210 build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
211 kernel_axis_name = "z";
212 break;
213 case 3:
214 build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
215 build_opts.add_option("-DBATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
216 kernel_axis_name = "w";
217 break;
218 default:
219 ARM_COMPUTE_ERROR("Not supported");
220 }
221 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reduction_operation_" + kernel_axis_name, build_opts.options()));
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100222
223 // Configure kernel window
Michalis Spyrou7930db42018-11-22 17:36:28 +0000224 auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis, op);
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100225
John Richardson62385bc2018-04-20 13:11:36 +0100226 ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100227
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100228 ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
John Richardson62385bc2018-04-20 13:11:36 +0100229}
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100230
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100231Status CLReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
John Richardson62385bc2018-04-20 13:11:36 +0100232{
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100233 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op, width));
Michalis Spyrou7930db42018-11-22 17:36:28 +0000234 ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis, op)));
John Richardson62385bc2018-04-20 13:11:36 +0100235
236 return Status{};
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100237}
238
239void CLReductionOperationKernel::run(const Window &window, cl::CommandQueue &queue)
240{
241 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
242 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
243
Michalis Spyrou7930db42018-11-22 17:36:28 +0000244 const bool is_arg_op = (_op == ReductionOperation::ARG_IDX_MAX || _op == ReductionOperation::ARG_IDX_MIN);
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100245 switch(_reduction_axis)
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100246 {
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100247 case 0:
248 {
249 // We use parallel reduction only in non quantized types
Michalis Spyrou7930db42018-11-22 17:36:28 +0000250 if(!is_data_type_quantized(_input->info()->data_type()) && !is_arg_op)
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100251 {
252 // Set out window
253 Window out_window(window);
254 out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
255
256 // Get first input and output slices
257 Window in_slice = window.first_slice_window_2D();
258 Window out_slice = out_window.first_slice_window_2D();
259
260 // Reshape window
261 const unsigned int border_width = ((in_slice.x().end() % border_val) != 0) ? border_val - in_slice.x().end() % border_val : 0;
262 in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), in_slice.x().end() + border_width, in_slice.x().step()));
263
264 // Set local sums buffer
265 unsigned int local_sum_size = lws_hint()[0] * _input->info()->element_size();
266 _kernel.setArg(num_arguments_per_2D_tensor() * 2, local_sum_size, nullptr);
267
268 do
269 {
270 unsigned int idx = 0;
271 add_2D_tensor_argument(idx, _input, in_slice);
272 add_2D_tensor_argument(idx, _output, out_slice);
273 enqueue(queue, *this, in_slice, lws_hint());
274 }
275 while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
276 }
277 else
278 {
279 // Get first input and output slices
280 Window window_in{ window };
281 window_in.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _input->info()->dimension(0)));
282
283 Window in_slice = window.first_slice_window_1D();
284 Window out_slice = window.first_slice_window_1D();
285
286 do
287 {
288 unsigned int idx = 0;
289 add_1D_tensor_argument(idx, _input, in_slice);
290 add_1D_tensor_argument(idx, _output, out_slice);
291 enqueue(queue, *this, in_slice);
292 }
293 while(window_in.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(out_slice));
294 }
295 }
296 break;
297 case 1:
298 {
299 // Get first input and output slices
300 Window window_in{ window };
301 window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), _input->info()->dimension(1)));
302 Window in_slice = window_in.first_slice_window_2D();
303 Window out_slice = window.first_slice_window_2D();
304
305 do
306 {
307 unsigned int idx = 0;
308 add_2D_tensor_argument(idx, _input, in_slice);
309 add_2D_tensor_argument(idx, _output, out_slice);
310 enqueue(queue, *this, in_slice);
311 }
312 while(window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
313 }
314 break;
315 case 2:
316 {
317 // Get first input and output slices
318 Window window_in{ window };
319 window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), _input->info()->dimension(2)));
320 Window in_slice = window_in.first_slice_window_3D();
321 Window out_slice = window.first_slice_window_3D();
322
323 do
324 {
325 unsigned int idx = 0;
326 add_3D_tensor_argument(idx, _input, in_slice);
327 add_3D_tensor_argument(idx, _output, out_slice);
328 enqueue(queue, *this, in_slice);
329 }
330 while(window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
331 }
332 break;
333 case 3:
334 {
335 // Get first input and output slices
336 Window window_in{ window };
337 window_in.set(3, Window::Dimension(0, 1, 1));
338 Window in_slice = window_in.first_slice_window_4D();
339 Window out_slice = window.first_slice_window_4D();
340
341 do
342 {
343 unsigned int idx = 0;
344 add_4D_tensor_argument(idx, _input, in_slice);
345 add_4D_tensor_argument(idx, _output, out_slice);
346 enqueue(queue, *this, in_slice);
347 }
348 while(window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
349 }
350 break;
351 default:
352 ARM_COMPUTE_ERROR("Not supported");
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100353 }
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100354}