blob: 286b94ebdca4fc428ea9004e97bbda053136d610 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +00002 * Copyright (c) 2016-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
25
26#include "arm_compute/core/CL/CLHelpers.h"
27#include "arm_compute/core/CL/CLKernelLibrary.h"
Vidhya Sudhan Loganathanf1f49062018-05-25 13:21:26 +010028#include "arm_compute/core/CL/CLValidate.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029#include "arm_compute/core/CL/ICLTensor.h"
30#include "arm_compute/core/CL/OpenCL.h"
31#include "arm_compute/core/Error.h"
32#include "arm_compute/core/Helpers.h"
33#include "arm_compute/core/TensorInfo.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034#include "arm_compute/core/Window.h"
35
36#include <cmath>
37#include <cstdlib>
38#include <set>
39#include <string>
40
41using namespace arm_compute;
42
Giorgio Arena70623822017-11-27 15:50:10 +000043namespace
44{
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +000045constexpr unsigned int num_elems_processed_per_iteration = 16;
46
Georgios Pinitas631c41a2017-12-06 11:53:03 +000047Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
48 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
Giorgio Arena70623822017-11-27 15:50:10 +000049{
50 ARM_COMPUTE_UNUSED(overflow_policy);
51 ARM_COMPUTE_UNUSED(rounding_policy);
52
Vidhya Sudhan Loganathanf1f49062018-05-25 13:21:26 +010053 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input1);
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +010054 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
Vidhya Sudhan Loganathanf1f49062018-05-25 13:21:26 +010055 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input2);
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +010056 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
Giorgio Arena70623822017-11-27 15:50:10 +000057 ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
58
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +000059 const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
60
61 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
Giorgio Arena70623822017-11-27 15:50:10 +000062
63 // Validate in case of configured output
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +000064 if(output->total_size() > 0)
Giorgio Arena70623822017-11-27 15:50:10 +000065 {
Vidhya Sudhan Loganathanf1f49062018-05-25 13:21:26 +010066 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(output);
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +010067 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
Giorgio Arena70623822017-11-27 15:50:10 +000068 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
69 "Output can only be U8 if both inputs are U8");
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +000070 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
Giorgio Arena70623822017-11-27 15:50:10 +000071 }
72
Georgios Pinitas631c41a2017-12-06 11:53:03 +000073 return Status{};
Giorgio Arena70623822017-11-27 15:50:10 +000074}
75
Georgios Pinitas631c41a2017-12-06 11:53:03 +000076std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
Giorgio Arena70623822017-11-27 15:50:10 +000077{
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +000078 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
79 const TensorShape &out_shape = broadcast_pair.first;
80 const ValidRegion &valid_region = broadcast_pair.second;
Giorgio Arena70623822017-11-27 15:50:10 +000081
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +000082 // Auto initialize output if not initialized
83 {
84 set_shape_if_empty(*output, out_shape);
85
86 if(input1->data_type() == DataType::S16 || input2->data_type() == DataType::S16)
87 {
88 set_format_if_unknown(*output, Format::S16);
89 }
90 else if(input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32)
91 {
92 set_format_if_unknown(*output, Format::F32);
93 }
94 }
95
96 Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
97 Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
98 Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
Giorgio Arena70623822017-11-27 15:50:10 +000099
100 AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration);
101 AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration);
102 AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
103
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000104 bool window_changed = update_window_and_padding(win_input1, input1_access)
105 || update_window_and_padding(win_input2, input2_access)
106 || update_window_and_padding(win, output_access);
Giorgio Arena70623822017-11-27 15:50:10 +0000107
Giorgio Arena70623822017-11-27 15:50:10 +0000108 output_access.set_valid_region(win, valid_region);
109
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000110 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
Giorgio Arena70623822017-11-27 15:50:10 +0000111 return std::make_pair(err, win);
112}
113} // namespace
114
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100115CLPixelWiseMultiplicationKernel::CLPixelWiseMultiplicationKernel()
116 : _input1(nullptr), _input2(nullptr), _output(nullptr)
117{
118}
119
120void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
121 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
122{
Georgios Pinitasf0dea702017-07-03 18:17:28 +0100123 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
Giorgio Arenaf6a43c52017-12-01 12:16:25 +0000124 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(),
125 scale, overflow_policy, rounding_policy));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100126
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000127 // Configure kernel window
128 auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
129 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
130
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131 _input1 = input1;
132 _input2 = input2;
133 _output = output;
134
135 int scale_int = -1;
136 // Extract sign, exponent and mantissa
137 int exponent = 0;
138 float normalized_mantissa = std::frexp(scale, &exponent);
139 // Use int scaling if factor is equal to 1/2^n for 0 <= n <= 15
140 // frexp returns 0.5 as mantissa which means that the exponent will be in the range of -1 <= e <= 14
141 // Moreover, it will be negative as we deal with 1/2^n
142 if((normalized_mantissa == 0.5f) && (-14 <= exponent) && (exponent <= 1))
143 {
144 // Store the positive exponent. We know that we compute 1/2^n
145 // Additionally we need to subtract 1 to compensate that frexp used a mantissa of 0.5
146 scale_int = std::abs(exponent - 1);
147 }
148
149 std::string data_type;
150 std::string compute_type;
151 // Check if it has float inputs and output
152 if(is_data_type_float(input1->info()->data_type()) || is_data_type_float(input2->info()->data_type()))
153 {
154 scale_int = -1;
Michele Di Giorgioab0a77e2017-06-21 15:36:24 +0100155 compute_type = (input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32) ? "float" : "half";
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100156 data_type = "DATA_TYPE_FLOAT";
157 }
158 else
159 {
Michele Di Giorgioab0a77e2017-06-21 15:36:24 +0100160 if(input1->info()->data_type() == DataType::S16 || input2->info()->data_type() == DataType::S16)
161 {
162 compute_type = "int";
163 }
Michele Di Giorgioab0a77e2017-06-21 15:36:24 +0100164 else
165 {
166 compute_type = "ushort";
167 }
168 data_type = "DATA_TYPE_INT";
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100169 }
170
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +0100171 const bool is_quantized = is_data_type_quantized_asymmetric(input1->info()->data_type());
172
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100173 // Construct kernel name
174 std::string kernel_name = "pixelwise_mul";
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +0100175 if(!is_data_type_quantized(output->info()->data_type()))
176 {
177 kernel_name += (scale_int >= 0) ? "_int" : "_float";
178 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100179
180 // Set kernel build options
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +0100181 CLBuildOptions build_opts;
182 if(is_quantized)
183 {
184 build_opts.add_option("-DOFFSET_IN1=" + support::cpp11::to_string(input1->info()->quantization_info().offset));
185 build_opts.add_option("-DOFFSET_IN2=" + support::cpp11::to_string(input2->info()->quantization_info().offset));
186 build_opts.add_option("-DOFFSET_OUT=" + support::cpp11::to_string(output->info()->quantization_info().offset));
187 build_opts.add_option("-DSCALE_IN1=" + support::cpp11::to_string(input1->info()->quantization_info().scale));
188 build_opts.add_option("-DSCALE_IN2=" + support::cpp11::to_string(input2->info()->quantization_info().scale));
189 build_opts.add_option("-DSCALE_OUT=" + support::cpp11::to_string(output->info()->quantization_info().scale));
190 kernel_name += "_quantized";
191 }
192 else
193 {
194 build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->info()->data_type()), "-DWRAP", "-DSATURATE");
195 build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
196 build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->info()->data_type()));
197 build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->info()->data_type()));
198 build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
199 build_opts.add_option("-DDATA_TYPE_RES=" + compute_type);
200 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100201
202 // Create kernel
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +0100203 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100204
205 // Set scale argument
Anthony Barbier9a7182e2017-07-11 18:36:40 +0100206 unsigned int idx = 3 * num_arguments_per_3D_tensor(); //Skip the inputs and output parameters
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100207
Georgios Pinitasbf28a3c2018-09-18 14:34:48 +0100208 if(scale_int >= 0 && !is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100209 {
210 _kernel.setArg(idx++, scale_int);
211 }
212 else
213 {
214 _kernel.setArg(idx++, scale);
215 }
216
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100217 ICLKernel::configure_internal(win_config.second);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100218}
219
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000220Status CLPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
221 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000222{
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000223 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
Giorgio Arena70623822017-11-27 15:50:10 +0000224 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy));
225 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000226
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000227 return Status{};
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000228}
229
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100230void CLPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
231{
232 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
233 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
234
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000235 const TensorShape &in_shape1 = _input1->info()->tensor_shape();
236 const TensorShape &in_shape2 = _input2->info()->tensor_shape();
237 const TensorShape &out_shape = _output->info()->tensor_shape();
238
239 bool can_collapse = true;
240 if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
241 {
242 can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
243 for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
244 {
245 can_collapse = (in_shape1[d] == in_shape2[d]);
246 }
247 }
248
249 bool has_collapsed = false;
250 Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
251
252 const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
253 const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
254
255 Window slice = collapsed.first_slice_window_3D();
256 Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
257 Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100258
259 do
260 {
261 unsigned int idx = 0;
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000262 add_3D_tensor_argument(idx, _input1, slice_input1);
263 add_3D_tensor_argument(idx, _input2, slice_input2);
Anthony Barbier9a7182e2017-07-11 18:36:40 +0100264 add_3D_tensor_argument(idx, _output, slice);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100265 enqueue(queue, *this, slice);
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000266
267 collapsed.slide_window_slice_3D(slice_input1);
268 collapsed.slide_window_slice_3D(slice_input2);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100269 }
Georgios Pinitas1d08a312018-01-03 12:29:22 +0000270 while(collapsed.slide_window_slice_3D(slice));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100271}
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000272
273BorderSize CLPixelWiseMultiplicationKernel::border_size() const
274{
275 const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
276 const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
277 return BorderSize(0, border, 0, 0);
278}