blob: 1d9c71555a73d349b681e44635337d0962a8019d [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michalis Spyroubcfd09a2019-05-01 13:03:59 +01002 * Copyright (c) 2018-2019 ARM Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h"
25
26#include "arm_compute/core/CL/CLHelpers.h"
27#include "arm_compute/core/CL/CLValidate.h"
28#include "arm_compute/core/CL/ICLTensor.h"
29#include <map>
30
31namespace arm_compute
32{
33namespace
34{
35constexpr unsigned int num_elems_processed_per_iteration = 16;
36
37std::map<ArithmeticOperation, std::string> supported_arithmetic_ops =
38{
39 { ArithmeticOperation::ADD, "ADD" },
40 { ArithmeticOperation::SUB, "SUB" },
41 { ArithmeticOperation::DIV, "DIV" },
42 { ArithmeticOperation::SQUARED_DIFF, "SQUARED_DIFF" },
43 { ArithmeticOperation::MIN, "MIN" },
44 { ArithmeticOperation::MAX, "MAX" },
Usama Arif52c54f62019-05-14 10:22:36 +010045 { ArithmeticOperation::POWER, "POWER" },
giuros011e6e1b82019-05-14 16:12:53 +010046 { ArithmeticOperation::PRELU, "PRELU" },
giuros01164a2722018-11-20 18:34:46 +000047};
48
49std::map<ArithmeticOperation, std::string> supported_sat_arithmetic_ops =
50{
51 { ArithmeticOperation::ADD, "ADD" },
52 { ArithmeticOperation::SUB, "SUB" },
53};
54
55std::string generate_id_for_tuning_common(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
56{
57 std::string config_id;
58 // Set config_id for enabling LWS tuning
59 config_id = kernel_name;
60 config_id += "_";
61 config_id += lower_string(string_from_data_type(input1.data_type()));
62 config_id += "_";
63 config_id += support::cpp11::to_string(output.dimension(0));
64 config_id += "_";
65 config_id += support::cpp11::to_string(output.dimension(1));
66 return config_id;
67}
68
Usama Arif52c54f62019-05-14 10:22:36 +010069Status validate_arguments_with_float_only_supported_rules(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
giuros0149f7c022018-12-03 19:25:22 +000070{
71 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(&input1, &input2, &output);
72 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1);
73 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::F16, DataType::F32);
74 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2);
75
76 const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
77
78 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
79
80 // Validate in case of configured output
81 if(output.total_size() > 0)
82 {
83 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::F16, DataType::F32);
84 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
85 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
86 "Wrong shape for output");
87 }
88
89 return Status{};
90}
91
giuros01164a2722018-11-20 18:34:46 +000092Status validate_arguments_with_arithmetic_rules(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
93{
94 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1);
95 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
96 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input2);
97 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
98
99 const bool is_qasymm = is_data_type_quantized_asymmetric(input1.data_type()) || is_data_type_quantized_asymmetric(input2.data_type());
100 if(is_qasymm)
101 {
102 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2);
103 }
104
105 const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
106
107 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
108
109 // Validate in case of configured output
110 if(output.total_size() > 0)
111 {
112 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&output);
113 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
114 ARM_COMPUTE_RETURN_ERROR_ON_MSG((output.data_type() == DataType::U8) && ((input1.data_type() != DataType::U8) || (input2.data_type() != DataType::U8)),
115 "Output can only be U8 if both inputs are U8");
116 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
117 "Wrong shape for output");
118 if(is_qasymm)
119 {
120 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
121 }
122 }
123 return Status{};
124}
125
126CLBuildOptions generate_build_options_with_arithmetic_rules(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, const std::string &operation_string)
127{
128 CLBuildOptions build_opts;
129
130 build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1.data_type()));
131 build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2.data_type()));
132 build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output.data_type()));
133 build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
134 build_opts.add_option("-DOP=" + operation_string);
135 if(is_data_type_quantized_asymmetric(input1.data_type()))
136 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100137 const UniformQuantizationInfo iq1info = input1.quantization_info().uniform();
138 const UniformQuantizationInfo iq2info = input2.quantization_info().uniform();
139 const UniformQuantizationInfo oqinfo = output.quantization_info().uniform();
140
141 build_opts.add_option("-DOFFSET_IN1=" + support::cpp11::to_string(iq1info.offset));
142 build_opts.add_option("-DOFFSET_IN2=" + support::cpp11::to_string(iq2info.offset));
143 build_opts.add_option("-DOFFSET_OUT=" + support::cpp11::to_string(oqinfo.offset));
144 build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1info.scale));
145 build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2info.scale));
146 build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oqinfo.scale));
giuros01164a2722018-11-20 18:34:46 +0000147 }
148 return build_opts;
149}
150
giuros0149f7c022018-12-03 19:25:22 +0000151std::pair<Status, Window> configure_window_arithmetic_common(const ValidRegion &valid_region, ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
152{
153 Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
154 Window win_input1 = win.broadcast_if_dimension_le_one(input1);
155 Window win_input2 = win.broadcast_if_dimension_le_one(input2);
156
157 AccessWindowHorizontal input1_access(&input1, 0, num_elems_processed_per_iteration);
158 AccessWindowHorizontal input2_access(&input2, 0, num_elems_processed_per_iteration);
159 AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
160
161 bool window_changed = update_window_and_padding(win_input1, input1_access)
162 || update_window_and_padding(win_input2, input2_access)
163 || update_window_and_padding(win, output_access);
164
165 output_access.set_valid_region(win, valid_region);
166
167 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
168 return std::make_pair(err, win);
169}
170
giuros01164a2722018-11-20 18:34:46 +0000171std::pair<Status, Window> validate_and_configure_window_for_arithmetic_operators(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
172{
173 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
174 const TensorShape &out_shape = broadcast_pair.first;
175 const ValidRegion &valid_region = broadcast_pair.second;
176
177 set_shape_if_empty(output, out_shape);
178
179 if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16)
180 {
181 set_format_if_unknown(output, Format::S16);
182 }
183 else if(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16)
184 {
185 set_format_if_unknown(output, Format::F16);
186 }
187 else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32)
188 {
189 set_format_if_unknown(output, Format::F32);
190 }
191
giuros0149f7c022018-12-03 19:25:22 +0000192 return configure_window_arithmetic_common(valid_region, input1, input2, output);
193}
giuros01164a2722018-11-20 18:34:46 +0000194
giuros0149f7c022018-12-03 19:25:22 +0000195std::pair<Status, Window> validate_and_configure_window_for_division(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
196{
197 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
198 const TensorShape &out_shape = broadcast_pair.first;
199 const ValidRegion &valid_region = broadcast_pair.second;
200 auto_init_if_empty(output, out_shape, 1, input1.data_type());
201 return configure_window_arithmetic_common(valid_region, input1, input2, output);
giuros01164a2722018-11-20 18:34:46 +0000202}
203} // namespace
204
205CLElementwiseOperationKernel::CLElementwiseOperationKernel()
206 : _input1(nullptr), _input2(nullptr), _output(nullptr)
207{
208}
209
210void CLElementwiseOperationKernel::configure_common(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
211{
212 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
213 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info()));
214
215 // Configure kernel window
216 auto win_config = validate_and_configure_window(*input1->info(), *input2->info(), *output->info());
217 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
218
219 _input1 = input1;
220 _input2 = input2;
221 _output = output;
222
223 std::string kernel_name = "elementwise_operation_" + name();
224 if(is_data_type_quantized_asymmetric(input1->info()->data_type()))
225 {
226 kernel_name += "_quantized";
227 }
228
229 // Set kernel build options
230 CLBuildOptions build_opts = generate_build_options(*input1->info(), *input2->info(), *output->info());
231
232 // Create kernel
233 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
234
235 ICLKernel::configure_internal(win_config.second);
236
237 _config_id = generate_id_for_tuning(kernel_name, *input1->info(), *output->info());
238}
239
240void CLElementwiseOperationKernel::run(const Window &window, cl::CommandQueue &queue)
241{
242 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
243 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
244
245 const TensorShape &in_shape1 = _input1->info()->tensor_shape();
246 const TensorShape &in_shape2 = _input2->info()->tensor_shape();
247 const TensorShape &out_shape = _output->info()->tensor_shape();
248
249 bool can_collapse = true;
250 const bool is_vector = in_shape1.num_dimensions() == 1 || in_shape2.num_dimensions() == 1;
251 if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1 && !is_vector)
252 {
253 can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
254 for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
255 {
256 can_collapse = (in_shape1[d] == in_shape2[d]);
257 }
258 }
259
260 bool has_collapsed = false;
261 Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
262
263 const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
264 const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
265
266 Window slice = collapsed.first_slice_window_3D();
267 Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
268 Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
269
270 do
271 {
272 unsigned int idx = 0;
273
274 add_3D_tensor_argument(idx, _input1, slice_input1);
275 add_3D_tensor_argument(idx, _input2, slice_input2);
276 add_3D_tensor_argument(idx, _output, slice);
277
278 enqueue(queue, *this, slice, lws_hint());
279
280 collapsed.slide_window_slice_3D(slice_input1);
281 collapsed.slide_window_slice_3D(slice_input2);
282 }
283 while(collapsed.slide_window_slice_3D(slice));
284}
285
286BorderSize CLElementwiseOperationKernel::border_size() const
287{
288 const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
289 const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100290 return BorderSize{ 0, border, 0, 0 };
giuros01164a2722018-11-20 18:34:46 +0000291}
292
293/** Arithmetic operations with saturation*/
294
295void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy)
296{
297 _policy = policy;
298 _op = op;
299 configure_common(input1, input2, output);
300}
301
302Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy)
303{
304 ARM_COMPUTE_UNUSED(op, policy);
305 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
306 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
307 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
308
309 return Status{};
310}
311
312std::pair<Status, Window> CLSaturatedArithmeticOperationKernel::validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
313{
314 return validate_and_configure_window_for_arithmetic_operators(input1, input2, output);
315}
316
317Status CLSaturatedArithmeticOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
318{
319 return validate_arguments_with_arithmetic_rules(input1, input2, output);
320}
321
322CLBuildOptions CLSaturatedArithmeticOperationKernel::generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
323{
324 const bool has_float_out = is_data_type_float(output.data_type());
325 auto build_options = generate_build_options_with_arithmetic_rules(input1, input2, output, name());
326 build_options.add_option((_policy == ConvertPolicy::WRAP || has_float_out) ? "-DWRAP" : "-DSATURATE");
327 return build_options;
328}
329std::string CLSaturatedArithmeticOperationKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
330{
331 auto config_id = generate_id_for_tuning_common(kernel_name, input1, output);
332 config_id += (_policy == ConvertPolicy::WRAP) ? "_wrap_" : "_saturate_";
333 config_id += lower_string(string_from_data_layout(input1.data_layout()));
334 return config_id;
335}
336
337std::string CLSaturatedArithmeticOperationKernel::name()
338{
339 return supported_sat_arithmetic_ops[_op];
340}
341
342/** Arithmetic operations*/
343
344void CLArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
345{
346 _op = op;
347 configure_common(input1, input2, output);
348}
349
350Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
351{
giuros01164a2722018-11-20 18:34:46 +0000352 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
Usama Arif52c54f62019-05-14 10:22:36 +0100353 if(op == ArithmeticOperation::DIV || op == ArithmeticOperation::POWER)
giuros0149f7c022018-12-03 19:25:22 +0000354 {
Usama Arif52c54f62019-05-14 10:22:36 +0100355 // Division and Power operators don't support integer arithmetic
356 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_float_only_supported_rules(*input1, *input2, *output));
giuros0149f7c022018-12-03 19:25:22 +0000357 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_division(*input1->clone(), *input2->clone(), *output->clone()).first);
358 }
359 else
360 {
361 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
362 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
363 }
364
giuros01164a2722018-11-20 18:34:46 +0000365 return Status{};
366}
367std::pair<Status, Window> CLArithmeticOperationKernel::validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
368{
Usama Arif52c54f62019-05-14 10:22:36 +0100369 if(_op == ArithmeticOperation::DIV || _op == ArithmeticOperation::POWER)
giuros0149f7c022018-12-03 19:25:22 +0000370 {
Usama Arif52c54f62019-05-14 10:22:36 +0100371 // Division and Power operators don't support integer arithmetic
giuros0149f7c022018-12-03 19:25:22 +0000372 return validate_and_configure_window_for_division(input1, input2, output);
373 }
374 else
375 {
376 return validate_and_configure_window_for_arithmetic_operators(input1, input2, output);
377 }
giuros01164a2722018-11-20 18:34:46 +0000378}
379Status CLArithmeticOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
380{
Usama Arif52c54f62019-05-14 10:22:36 +0100381 if(_op == ArithmeticOperation::DIV || _op == ArithmeticOperation::POWER)
giuros0149f7c022018-12-03 19:25:22 +0000382 {
Usama Arif52c54f62019-05-14 10:22:36 +0100383 // Division and Power operators don't support integer arithmetic
384 return validate_arguments_with_float_only_supported_rules(input1, input2, output);
giuros0149f7c022018-12-03 19:25:22 +0000385 }
386 else
387 {
388 return validate_arguments_with_arithmetic_rules(input1, input2, output);
389 }
giuros01164a2722018-11-20 18:34:46 +0000390}
391
392CLBuildOptions CLArithmeticOperationKernel::generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
393{
394 return generate_build_options_with_arithmetic_rules(input1, input2, output, name());
395}
396std::string CLArithmeticOperationKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
397{
398 return generate_id_for_tuning_common(kernel_name, input1, output);
399}
400
401std::string CLArithmeticOperationKernel::name()
402{
403 return supported_arithmetic_ops[_op];
404}
405} // namespace arm_compute