blob: 947be18b808d31375ead44dc412daabc411d882e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Georgios Pinitas5a594532018-12-03 14:30:05 +00002 * Copyright (c) 2016-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h"
25
Anthony Barbiereaefd002018-07-20 17:49:35 +010026#include "arm_compute/core/CPP/Validate.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/IAccessWindow.h"
30#include "arm_compute/core/ITensor.h"
Michele Di Giorgio81f0d152017-07-11 15:00:52 +010031#include "arm_compute/core/NEON/NEFixedPoint.h"
Georgios Pinitas5a594532018-12-03 14:30:05 +000032#include "arm_compute/core/NEON/wrapper/wrapper.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033#include "arm_compute/core/TensorInfo.h"
34#include "arm_compute/core/Validate.h"
35
36#include <algorithm>
37#include <arm_neon.h>
38#include <cstdint>
39#include <map>
40#include <string>
41
42using namespace arm_compute;
43
44namespace arm_compute
45{
46class Coordinates;
47} // namespace arm_compute
48
49namespace
50{
Georgios Pinitas5a594532018-12-03 14:30:05 +000051template <typename T, bool is_sat>
52void add_same(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolicy policy, const Window &window)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010053{
Georgios Pinitas5a594532018-12-03 14:30:05 +000054 ARM_COMPUTE_UNUSED(policy);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010055
Georgios Pinitas5a594532018-12-03 14:30:05 +000056 /** NEON vector tag type. */
57 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
58
59 // Create input windows
60 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
61 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
62
63 // Clear X Dimension on execution window as we handle manually
64 Window win = window;
65 win.set(Window::DimX, Window::Dimension(0, 1, 1));
66
67 constexpr int window_step_x = 16 / sizeof(T);
68 const auto window_start_x = static_cast<int>(window.x().start());
69 const auto window_end_x = static_cast<int>(window.x().end());
70 const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
71
72 if(is_broadcast_across_x)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010073 {
Georgios Pinitas5a594532018-12-03 14:30:05 +000074 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
75 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
76 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
77 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
78 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010079
Georgios Pinitas5a594532018-12-03 14:30:05 +000080 // Clear X Dimension on execution window as we handle manually
81 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010082
Georgios Pinitas5a594532018-12-03 14:30:05 +000083 Iterator broadcast_input(broadcast_tensor, broadcast_win);
84 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
85 Iterator output(out, win);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010086
Michalis Spyroua4f378d2019-04-26 14:54:54 +010087 execute_window_loop(win, [&](const Coordinates &)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010088 {
Georgios Pinitas5a594532018-12-03 14:30:05 +000089 const auto non_broadcast_input_ptr = reinterpret_cast<const T *>(non_broadcast_input.ptr());
90 const auto output_ptr = reinterpret_cast<T *>(output.ptr());
Anthony Barbier6ff3b192017-09-04 18:44:23 +010091
Georgios Pinitas5a594532018-12-03 14:30:05 +000092 const T broadcast_value = *reinterpret_cast<const T *>(broadcast_input.ptr());
93 const auto broadcast_value_vec = wrapper::vdup_n(broadcast_value, ExactTagType{});
Anthony Barbier6ff3b192017-09-04 18:44:23 +010094
Georgios Pinitas5a594532018-12-03 14:30:05 +000095 // Compute S elements per iteration
96 int x = window_start_x;
97 for(; x <= (window_end_x - window_step_x); x += window_step_x)
98 {
99 const auto non_broadcast_v = wrapper::vloadq(non_broadcast_input_ptr + x);
100 const auto res = is_sat ? wrapper::vqadd(broadcast_value_vec, non_broadcast_v) : wrapper::vadd(broadcast_value_vec, non_broadcast_v);
101 wrapper::vstore(output_ptr + x, res);
102 }
103
104 // Compute left-over elements
105 for(; x < window_end_x; ++x)
106 {
107 const auto non_broadcast_v = *(non_broadcast_input_ptr + x);
108 *(output_ptr + x) = is_sat ? wrapper::add_sat(broadcast_value, non_broadcast_v) : broadcast_value + non_broadcast_v;
109 }
110 },
111 broadcast_input, non_broadcast_input, output);
112 }
113 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100114 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000115 // Clear X Dimension on execution window as we handle manually
116 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
117 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
118
119 Iterator input1(in1, input1_win);
120 Iterator input2(in2, input2_win);
121 Iterator output(out, win);
122
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100123 execute_window_loop(win, [&](const Coordinates &)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100124 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000125 const auto input1_ptr = reinterpret_cast<const T *>(input1.ptr());
126 const auto input2_ptr = reinterpret_cast<const T *>(input2.ptr());
127 const auto output_ptr = reinterpret_cast<T *>(output.ptr());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100128
Georgios Pinitas5a594532018-12-03 14:30:05 +0000129 // Compute S elements per iteration
130 int x = window_start_x;
131 for(; x <= (window_end_x - window_step_x); x += window_step_x)
132 {
133 const auto val1 = wrapper::vloadq(input1_ptr + x);
134 const auto val2 = wrapper::vloadq(input2_ptr + x);
135 const auto res = is_sat ? wrapper::vqadd(val1, val2) : wrapper::vadd(val1, val2);
136 wrapper::vstore(output_ptr + x, res);
137 }
138
139 // Compute left-over elements
140 for(; x < window_end_x; ++x)
141 {
142 const auto val1 = *(input1_ptr + x);
143 const auto val2 = *(input2_ptr + x);
144 *(output_ptr + x) = is_sat ? wrapper::add_sat(val1, val2) : val1 + val2;
145 }
146 },
147 input1, input2, output);
148 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100149}
150
Georgios Pinitas5a594532018-12-03 14:30:05 +0000151void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolicy policy, const Window &window)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100152{
Georgios Pinitas5a594532018-12-03 14:30:05 +0000153 ARM_COMPUTE_UNUSED(policy);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100154
Georgios Pinitas5a594532018-12-03 14:30:05 +0000155 // Create input windows
156 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
157 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100158
Georgios Pinitas5a594532018-12-03 14:30:05 +0000159 // Clear X Dimension on execution window as we handle manually
160 Window win = window;
161 win.set(Window::DimX, Window::Dimension(0, 1, 1));
Pablo Tellod1b0ecc2017-07-11 11:27:04 +0100162
Georgios Pinitas5a594532018-12-03 14:30:05 +0000163 const int window_step_x = 16;
164 const auto window_start_x = static_cast<int>(window.x().start());
165 const auto window_end_x = static_cast<int>(window.x().end());
166 const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
Pablo Tellod1b0ecc2017-07-11 11:27:04 +0100167
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100168 const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform();
169 const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform();
170 const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform();
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000171
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100172 const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
173 const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
174 const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
175 const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset);
176 const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset);
177 const float32x4_t voffseto = vdupq_n_f32(oq_info.offset);
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000178
Georgios Pinitas5a594532018-12-03 14:30:05 +0000179 if(is_broadcast_across_x)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000180 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100181 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
182 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
183 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
184 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
185 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
186 const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
187 const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000188
Georgios Pinitas5a594532018-12-03 14:30:05 +0000189 // Clear X Dimension on execution window as we handle manually
190 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
191
192 Iterator broadcast_input(broadcast_tensor, broadcast_win);
193 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
194 Iterator output(out, win);
195
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100196 execute_window_loop(win, [&](const Coordinates &)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000197 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000198 const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
199 const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000200
Georgios Pinitas5a594532018-12-03 14:30:05 +0000201 const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
202 const uint8x16_t broadcast_value_vec = vdupq_n_u8(broadcast_value);
203
204 const float32x4x4_t bf =
205 {
206 {
207 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset2)), vscale2),
208 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset2)), vscale2),
209 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset2)), vscale2),
210 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset2)), vscale2),
211 }
212 };
213 const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
214
215 // Compute S elements per iteration
216 int x = window_start_x;
217 for(; x <= (window_end_x - window_step_x); x += window_step_x)
218 {
219 const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
220 const float32x4x4_t af =
221 {
222 {
223 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1),
224 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1),
225 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1),
226 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1),
227 }
228 };
229
230 const int32x4x4_t rf =
231 {
232 {
Vidhya Sudhan Loganathanf8b65202019-02-01 09:49:50 +0000233#ifdef __aarch64__
234 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
235 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
236 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
237 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100238#else //__aarch64__
Georgios Pinitas5a594532018-12-03 14:30:05 +0000239 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
240 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
241 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
242 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
Vidhya Sudhan Loganathanf8b65202019-02-01 09:49:50 +0000243#endif //__aarch64__
Georgios Pinitas5a594532018-12-03 14:30:05 +0000244 }
245 };
246
247 const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
248 const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
249 vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
250 }
251
252 // Compute left-over elements
253 for(; x < window_end_x; ++x)
254 {
255 const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100256 *(output_ptr + x) = quantize_qasymm8((afs + bfs), oq_info);
Georgios Pinitas5a594532018-12-03 14:30:05 +0000257 }
258 },
259 broadcast_input, non_broadcast_input, output);
260 }
261 else
262 {
263 // Clear X Dimension on execution window as we handle manually
264 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
265 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
266
Georgios Pinitas5a594532018-12-03 14:30:05 +0000267 Iterator input1(in1, input1_win);
268 Iterator input2(in2, input2_win);
269 Iterator output(out, win);
270
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100271 execute_window_loop(win, [&](const Coordinates &)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000272 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000273 const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
274 const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
275 const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000276
Georgios Pinitas5a594532018-12-03 14:30:05 +0000277 // Compute S elements per iteration
278 int x = window_start_x;
279 for(; x <= (window_end_x - window_step_x); x += window_step_x)
280 {
281 const uint8x16_t a = vld1q_u8(input1_ptr + x);
282 const uint8x16_t b = vld1q_u8(input2_ptr + x);
283
284 const float32x4x4_t af =
285 {
286 {
287 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1),
288 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1),
289 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1),
290 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1),
291 }
292 };
293
294 const float32x4x4_t bf =
295 {
296 {
297 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2),
298 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2),
299 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2),
300 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2),
301 }
302 };
303
304 const int32x4x4_t rf =
305 {
306 {
Vidhya Sudhan Loganathanf8b65202019-02-01 09:49:50 +0000307#ifdef __aarch64__
308 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
309 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
310 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
311 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100312#else //__aarch64__
Georgios Pinitas5a594532018-12-03 14:30:05 +0000313 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
314 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
315 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
316 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
Vidhya Sudhan Loganathanf8b65202019-02-01 09:49:50 +0000317#endif //__aarch64__
Georgios Pinitas5a594532018-12-03 14:30:05 +0000318 }
319 };
320
321 const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
322 const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
323 vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
324 }
325
326 // Compute left-over elements
327 for(; x < window_end_x; ++x)
328 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100329 const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale;
330 const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale;
331 *(output_ptr + x) = quantize_qasymm8((afs + bfs), out->info()->quantization_info());
Georgios Pinitas5a594532018-12-03 14:30:05 +0000332 }
333 },
334 input1, input2, output);
335 }
336}
337
Michalis Spyroubc4d7c22019-12-03 15:11:09 +0000338void add_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolicy policy, const Window &window)
339{
340 ARM_COMPUTE_UNUSED(policy);
341
342 // Create input windows
343 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
344 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
345
346 // Clear X Dimension on execution window as we handle manually
347 Window win = window;
348 win.set(Window::DimX, Window::Dimension(0, 1, 1));
349
350 const int window_step_x = 16;
351 const auto window_start_x = static_cast<int>(window.x().start());
352 const auto window_end_x = static_cast<int>(window.x().end());
353 const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
354
355 const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform();
356 const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform();
357 const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform();
358
359 const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
360 const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
361 const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
362 const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset);
363 const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset);
364 const float32x4_t voffseto = vdupq_n_f32(oq_info.offset);
365
366 if(is_broadcast_across_x)
367 {
368 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
369 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
370 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
371 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
372 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
373 const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
374 const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
375
376 // Clear X Dimension on execution window as we handle manually
377 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
378
379 Iterator broadcast_input(broadcast_tensor, broadcast_win);
380 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
381 Iterator output(out, win);
382
383 execute_window_loop(win, [&](const Coordinates &)
384 {
385 const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
386 const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
387
388 const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
389 const int8x16_t broadcast_value_vec = vdupq_n_s8(broadcast_value);
390
391 const float32x4x4_t bf =
392 {
393 {
394 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(broadcast_value_vec)))), voffset2)), vscale2),
395 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(broadcast_value_vec)))), voffset2)), vscale2),
396 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(broadcast_value_vec)))), voffset2)), vscale2),
397 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(broadcast_value_vec)))), voffset2)), vscale2),
398 }
399 };
400 const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
401
402 // Compute S elements per iteration
403 int x = window_start_x;
404 for(; x <= (window_end_x - window_step_x); x += window_step_x)
405 {
406 const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
407 const float32x4x4_t af =
408 {
409 {
410 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1),
411 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1),
412 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1),
413 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1),
414 }
415 };
416
417 const int32x4x4_t rf =
418 {
419 {
420#ifdef __aarch64__
421 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
422 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
423 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
424 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
425#else //__aarch64__
426 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
427 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
428 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
429 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
430#endif //__aarch64__
431 }
432 };
433
434 const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
435 const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
436 vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
437 }
438
439 // Compute left-over elements
440 for(; x < window_end_x; ++x)
441 {
442 const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
443 *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), oq_info);
444 }
445 },
446 broadcast_input, non_broadcast_input, output);
447 }
448 else
449 {
450 // Clear X Dimension on execution window as we handle manually
451 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
452 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
453
454 Iterator input1(in1, input1_win);
455 Iterator input2(in2, input2_win);
456 Iterator output(out, win);
457
458 execute_window_loop(win, [&](const Coordinates &)
459 {
460 const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
461 const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
462 const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
463
464 // Compute S elements per iteration
465 int x = window_start_x;
466 for(; x <= (window_end_x - window_step_x); x += window_step_x)
467 {
468 const int8x16_t a = vld1q_s8(input1_ptr + x);
469 const int8x16_t b = vld1q_s8(input2_ptr + x);
470
471 const float32x4x4_t af =
472 {
473 {
474 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1),
475 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1),
476 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1),
477 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1),
478 }
479 };
480
481 const float32x4x4_t bf =
482 {
483 {
484 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(b)))), voffset2)), vscale2),
485 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(b)))), voffset2)), vscale2),
486 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(b)))), voffset2)), vscale2),
487 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(b)))), voffset2)), vscale2),
488 }
489 };
490
491 const int32x4x4_t rf =
492 {
493 {
494#ifdef __aarch64__
495 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
496 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
497 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
498 vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
499#else //__aarch64__
500 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
501 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
502 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
503 vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
504#endif //__aarch64__
505 }
506 };
507
508 const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
509 const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
510 vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
511 }
512
513 // Compute left-over elements
514 for(; x < window_end_x; ++x)
515 {
516 const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale;
517 const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale;
518 *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), out->info()->quantization_info());
519 }
520 },
521 input1, input2, output);
522 }
523}
524
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100525void add_QSYMM16_QSYMM16_QSYMM16(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolicy policy, const Window &window)
526{
527 ARM_COMPUTE_UNUSED(policy);
528
529 // Create input windows
530 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
531 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
532
533 // Clear X Dimension on execution window as we handle manually
534 Window win = window;
535 win.set(Window::DimX, Window::Dimension(0, 1, 1));
536
537 const int window_step_x = 8;
538 const auto window_start_x = static_cast<int>(window.x().start());
539 const auto window_end_x = static_cast<int>(window.x().end());
540 const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
541
542 const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform();
543 const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform();
544 const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform();
545
546 const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
547 const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
548 const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
549
550 if(is_broadcast_across_x)
551 {
552 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
553 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
554 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
555 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
556 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
557 const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
558 const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
559
560 // Clear X Dimension on execution window as we handle manually
561 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
562
563 Iterator broadcast_input(broadcast_tensor, broadcast_win);
564 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
565 Iterator output(out, win);
566
567 execute_window_loop(win, [&](const Coordinates &)
568 {
569 const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr());
570 const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
571
572 const int16_t broadcast_value = *reinterpret_cast<const int16_t *>(broadcast_input.ptr());
573 const int16x8_t broadcast_value_vec = vdupq_n_s16(broadcast_value);
574
575 const float32x4x2_t bf =
576 {
577 {
578 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(broadcast_value_vec))), vscale2),
579 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(broadcast_value_vec))), vscale2),
580 }
581 };
582 const float bfs = static_cast<int32_t>(broadcast_value) * broadcast_qinfo.scale;
583
584 // Compute S elements per iteration
585 int x = window_start_x;
586 for(; x <= (window_end_x - window_step_x); x += window_step_x)
587 {
588 const int16x8_t a = vld1q_s16(non_broadcast_input_ptr + x);
589 const float32x4x2_t af =
590 {
591 {
592 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1),
593 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1),
594 }
595 };
596
597 const int32x4x4_t rf =
598 {
599 {
600#ifdef __aarch64__
601 vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
602 vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
603#else //__aarch64__
604 vcvtq_s32_f32(vmulq_f32(vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
605 vcvtq_s32_f32(vmulq_f32(vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
606#endif //__aarch64__
607 }
608 };
609
610 const int16x8_t pa = vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1]));
611 vst1q_s16(output_ptr + x, pa);
612 }
613
614 // Compute left-over elements
615 for(; x < window_end_x; ++x)
616 {
617 const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x)) * non_broadcast_qinfo.scale;
618 *(output_ptr + x) = quantize_qsymm16((afs + bfs), oq_info);
619 }
620 },
621 broadcast_input, non_broadcast_input, output);
622 }
623 else
624 {
625 // Clear X Dimension on execution window as we handle manually
626 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
627 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
628
629 Iterator input1(in1, input1_win);
630 Iterator input2(in2, input2_win);
631 Iterator output(out, win);
632
633 execute_window_loop(win, [&](const Coordinates &)
634 {
635 const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
636 const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
637 const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
638
639 // Compute S elements per iteration
640 int x = window_start_x;
641 for(; x <= (window_end_x - window_step_x); x += window_step_x)
642 {
643 const int16x8_t a = vld1q_s16(input1_ptr + x);
644 const int16x8_t b = vld1q_s16(input2_ptr + x);
645
646 const float32x4x2_t af =
647 {
648 {
649 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1),
650 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1),
651 }
652 };
653
654 const float32x4x2_t bf =
655 {
656 {
657 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(b))), vscale2),
658 vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(b))), vscale2),
659 }
660 };
661
662 const int32x4x2_t rf =
663 {
664 {
665#ifdef __aarch64__
666 vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
667 vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
668#else //__aarch64__
669 vcvtq_s32_f32(vmulq_f32(vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
670 vcvtq_s32_f32(vmulq_f32(vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
671#endif //__aarch64__
672 }
673 };
674
675 const int16x8_t pa = vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1]));
676 vst1q_s16(output_ptr + x, pa);
677 }
678
679 // Compute left-over elements
680 for(; x < window_end_x; ++x)
681 {
682 const float afs = static_cast<int32_t>((*(input1_ptr + x))) * iq1_info.scale;
683 const float bfs = static_cast<int32_t>((*(input2_ptr + x))) * iq2_info.scale;
684 *(output_ptr + x) = quantize_qsymm16((afs + bfs), out->info()->quantization_info());
685 }
686 },
687 input1, input2, output);
688 }
689}
690
Georgios Pinitas5a594532018-12-03 14:30:05 +0000691void add_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolicy policy, const Window &window)
692{
693 // Create input windows
694 Window win = window;
695 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
696 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
697
698 // Clear X Dimension on execution window as we handle manually
699 win.set(Window::DimX, Window::Dimension(0, 1, 1));
700 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
701 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
702
703 Iterator input1(in1, input1_win);
704 Iterator input2(in2, input2_win);
705 Iterator output(out, win);
706
707 const int window_step_x = 8;
708 const auto window_start_x = static_cast<int>(window.x().start());
709 const auto window_end_x = static_cast<int>(window.x().end());
710
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100711 execute_window_loop(win, [&](const Coordinates &)
Georgios Pinitas5a594532018-12-03 14:30:05 +0000712 {
713 const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
714 const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
715 const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
716
717 if(policy == ConvertPolicy::WRAP)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000718 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000719 // Compute S elements per iteration
720 int x = window_start_x;
721 for(; x <= (window_end_x - window_step_x); x += window_step_x)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000722 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000723 const auto vin1 = wrapper::vloadq(input1_ptr + x);
724 const auto vin2 = vreinterpretq_s16_u16(wrapper::vmovl(wrapper::vload(input2_ptr + x)));
725 wrapper::vstore(output_ptr + x, wrapper::vadd(vin1, vin2));
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000726 }
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000727
Georgios Pinitas5a594532018-12-03 14:30:05 +0000728 // Compute left-over elements
729 for(; x < window_end_x; ++x)
730 {
731 *(output_ptr + x) = *(input1_ptr + x) + static_cast<int16_t>(*(input2_ptr + x));
732 }
733 }
734 else
735 {
736 // Compute S elements per iteration
737 int x = window_start_x;
738 for(; x <= (window_end_x - window_step_x); x += window_step_x)
739 {
740 const auto vin1 = wrapper::vloadq(input1_ptr + x);
741 const auto vin2 = vreinterpretq_s16_u16(wrapper::vmovl(wrapper::vload(input2_ptr + x)));
742 wrapper::vstore(output_ptr + x, wrapper::vqadd(vin1, vin2));
743 }
744
745 // Compute left-over elements
746 for(; x < window_end_x; ++x)
747 {
748 *(output_ptr + x) = wrapper::add_sat(*(input1_ptr + x), static_cast<int16_t>(*(input2_ptr + x)));
749 }
750 }
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000751 },
752 input1, input2, output);
753}
754
Georgios Pinitas5a594532018-12-03 14:30:05 +0000755inline void add_U8_S16_S16(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy, const Window &window)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100756{
Georgios Pinitas5a594532018-12-03 14:30:05 +0000757 // Simply swap the two input buffers:
758 add_S16_U8_S16(input2, input1, output, policy, window);
759}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100760
Georgios Pinitas5a594532018-12-03 14:30:05 +0000761void add_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolicy policy, const Window &window)
762{
763 // Create input windows
764 Window win = window;
765 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
766 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
767
768 // Clear X Dimension on execution window as we handle manually
769 win.set(Window::DimX, Window::Dimension(0, 1, 1));
770 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
771 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
772
773 Iterator input1(in1, input1_win);
774 Iterator input2(in2, input2_win);
775 Iterator output(out, win);
776
777 const int window_step_x = 8;
778 const auto window_start_x = static_cast<int>(window.x().start());
779 const auto window_end_x = static_cast<int>(window.x().end());
780
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100781 execute_window_loop(win, [&](const Coordinates &)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100782 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000783 const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
784 const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
785 const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100786
Georgios Pinitas5a594532018-12-03 14:30:05 +0000787 if(policy == ConvertPolicy::WRAP)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100788 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000789 // Compute S elements per iteration
790 int x = window_start_x;
791 for(; x <= (window_end_x - window_step_x); x += window_step_x)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100792 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000793 const auto vin1 = vreinterpretq_s16_u16(wrapper::vmovl(wrapper::vload(input1_ptr + x)));
794 const auto vin2 = vreinterpretq_s16_u16(wrapper::vmovl(wrapper::vload(input2_ptr + x)));
795 wrapper::vstore(output_ptr + x, wrapper::vadd(vin1, vin2));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100796 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100797
Georgios Pinitas5a594532018-12-03 14:30:05 +0000798 // Compute left-over elements
799 for(; x < window_end_x; ++x)
800 {
801 *(output_ptr + x) = static_cast<int16_t>(*(input1_ptr + x)) + static_cast<int16_t>(*(input2_ptr + x));
802 }
803 }
804 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100805 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000806 // Compute S elements per iteration
807 int x = window_start_x;
808 for(; x <= (window_end_x - window_step_x); x += window_step_x)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100809 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000810 const auto vin1 = vreinterpretq_s16_u16(wrapper::vmovl(wrapper::vload(input1_ptr + x)));
811 const auto vin2 = vreinterpretq_s16_u16(wrapper::vmovl(wrapper::vload(input2_ptr + x)));
812 wrapper::vstore(output_ptr + x, wrapper::vqadd(vin1, vin2));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100813 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100814
Georgios Pinitas5a594532018-12-03 14:30:05 +0000815 // Compute left-over elements
816 for(; x < window_end_x; ++x)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100817 {
Georgios Pinitas5a594532018-12-03 14:30:05 +0000818 *(output_ptr + x) = wrapper::add_sat(static_cast<int16_t>(*(input1_ptr + x)),
819 static_cast<int16_t>(*(input2_ptr + x)));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100820 }
Georgios Pinitas5a594532018-12-03 14:30:05 +0000821 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100822 },
823 input1, input2, output);
824}
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000825
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000826Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ConvertPolicy policy)
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000827{
828 ARM_COMPUTE_UNUSED(policy);
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000829
Anthony Barbiereaefd002018-07-20 17:49:35 +0100830 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1);
Michalis Spyroubc4d7c22019-12-03 15:11:09 +0000831 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::QSYMM16, DataType::F16, DataType::F32);
832 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::QSYMM16, DataType::F16, DataType::F32);
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000833
834 const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
835
836 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
Georgios Pinitas5a594532018-12-03 14:30:05 +0000837 ARM_COMPUTE_RETURN_ERROR_ON_MSG((input1.tensor_shape().x() != input2.tensor_shape().x()) && ((input1.data_type() != input2.data_type()) || (input1.data_type() != output.data_type())
838 || (input2.data_type() != output.data_type())),
839 "Broadcasting across width is supported on configurations where all tensors have the same data type");
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000840
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000841 // Validate in case of configured output
842 if(output.total_size() > 0)
843 {
844 ARM_COMPUTE_RETURN_ERROR_ON_MSG(
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100845 !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::U8)
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000846 && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
847 && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
848 && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000849 && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
850 && !(input1.data_type() == DataType::F32 && input2.data_type() == DataType::F32 && output.data_type() == DataType::F32)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000851 && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16 && output.data_type() == DataType::F16)
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100852 && !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && output.data_type() == DataType::QASYMM8)
Michalis Spyroubc4d7c22019-12-03 15:11:09 +0000853 && !(input1.data_type() == DataType::QASYMM8_SIGNED && input2.data_type() == DataType::QASYMM8_SIGNED && output.data_type() == DataType::QASYMM8_SIGNED)
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100854 && !(input1.data_type() == DataType::QSYMM16 && input2.data_type() == DataType::QSYMM16 && output.data_type() == DataType::QSYMM16),
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000855 "You called addition with the wrong image formats");
856
857 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
858 "Wrong shape for output");
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000859 }
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000860
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000861 return Status{};
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000862}
863
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000864std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000865{
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000866 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
867 const TensorShape &out_shape = broadcast_pair.first;
868 const ValidRegion &valid_region = broadcast_pair.second;
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000869
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000870 // Auto initialize output if not initialized
871 {
872 set_shape_if_empty(output, out_shape);
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000873
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000874 if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16)
875 {
876 set_format_if_unknown(output, Format::S16);
877 }
878 else if(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16)
879 {
880 set_format_if_unknown(output, Format::F16);
881 }
882 else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32)
883 {
884 set_format_if_unknown(output, Format::F32);
885 }
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100886 else if(input1.data_type() == DataType::QASYMM8)
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000887 {
888 set_data_type_if_unknown(output, DataType::QASYMM8);
889 }
Michalis Spyroubc4d7c22019-12-03 15:11:09 +0000890 else if(input1.data_type() == DataType::QASYMM8_SIGNED)
891 {
892 set_data_type_if_unknown(output, DataType::QASYMM8_SIGNED);
893 }
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100894 else if(input1.data_type() == DataType::QSYMM16)
895 {
896 set_data_type_if_unknown(output, DataType::QSYMM16);
897 }
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000898 }
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000899
Georgios Pinitas5a594532018-12-03 14:30:05 +0000900 Window win = calculate_max_window(valid_region, Steps());
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000901
Georgios Pinitas5a594532018-12-03 14:30:05 +0000902 // NEArithmeticAdditionKernel doesn't need padding so update_window_and_padding() can be skipped
903 Coordinates coord;
904 coord.set_num_dimensions(output.num_dimensions());
905 output.set_valid_region(valid_region);
Georgios Pinitas5a594532018-12-03 14:30:05 +0000906 return std::make_pair(Status{}, win);
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000907}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100908} // namespace
909
910NEArithmeticAdditionKernel::NEArithmeticAdditionKernel()
Georgios Pinitas5a594532018-12-03 14:30:05 +0000911 : _func(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _policy()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100912{
913}
914
915void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy)
916{
917 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000918 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), policy));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100919
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000920 // Configure kernel window
921 auto win_config = validate_and_configure_window(*input1->info(), *input2->info(), *output->info());
922 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100923
924 static std::map<std::string, AddFunction *> map_function =
925 {
Georgios Pinitasa84faff2018-12-05 18:17:24 +0000926 { "add_wrap_QASYMM8_QASYMM8_QASYMM8", &add_QASYMM8_QASYMM8_QASYMM8 },
927 { "add_saturate_QASYMM8_QASYMM8_QASYMM8", &add_QASYMM8_QASYMM8_QASYMM8 },
Michalis Spyroubc4d7c22019-12-03 15:11:09 +0000928 { "add_wrap_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED", &add_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED },
929 { "add_saturate_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED", &add_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED },
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100930 { "add_wrap_QSYMM16_QSYMM16_QSYMM16", &add_QSYMM16_QSYMM16_QSYMM16 },
931 { "add_saturate_QSYMM16_QSYMM16_QSYMM16", &add_QSYMM16_QSYMM16_QSYMM16 },
Georgios Pinitas5a594532018-12-03 14:30:05 +0000932 { "add_wrap_U8_U8_U8", &add_same<uint8_t, false> },
933 { "add_saturate_U8_U8_U8", &add_same<uint8_t, true> },
934 { "add_wrap_S16_U8_S16", &add_S16_U8_S16 },
935 { "add_saturate_S16_U8_S16", &add_S16_U8_S16 },
936 { "add_wrap_U8_S16_S16", &add_U8_S16_S16 },
937 { "add_saturate_U8_S16_S16", &add_U8_S16_S16 },
938 { "add_wrap_U8_U8_S16", &add_U8_U8_S16 },
939 { "add_saturate_U8_U8_S16", &add_U8_U8_S16 },
940 { "add_wrap_S16_S16_S16", &add_same<int16_t, false> },
941 { "add_saturate_S16_S16_S16", &add_same<int16_t, true> },
942 { "add_wrap_F32_F32_F32", &add_same<float, false> },
943 { "add_saturate_F32_F32_F32", &add_same<float, false> },
944#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
945 { "add_wrap_F16_F16_F16", &add_same<float16_t, false> },
946 { "add_saturate_F16_F16_F16", &add_same<float16_t, false> },
947#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100948 };
949
950 _input1 = input1;
951 _input2 = input2;
952 _output = output;
Georgios Pinitas5a594532018-12-03 14:30:05 +0000953 _policy = policy;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100954
955 std::string function_to_call("add_");
956 function_to_call += policy == ConvertPolicy::WRAP ? "wrap_" : "saturate_";
957 function_to_call += string_from_data_type(input1->info()->data_type()) + "_";
958 function_to_call += string_from_data_type(input2->info()->data_type()) + "_";
959 function_to_call += string_from_data_type(output->info()->data_type());
960
961 auto it = map_function.find(function_to_call);
962
963 if(it != map_function.end())
964 {
965 _func = it->second;
966 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100967
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000968 INEKernel::configure(win_config.second);
969}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100970
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000971Status NEArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
Ioan-Cristian Szabo397d58a2017-11-30 15:19:11 +0000972{
Georgios Pinitascbf39c62018-09-10 15:07:45 +0100973 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000974
975 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output, policy));
976 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100977
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000978 return Status{};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100979}
980
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100981void NEArithmeticAdditionKernel::run(const Window &window, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100982{
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100983 ARM_COMPUTE_UNUSED(info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100984 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
985 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
986 ARM_COMPUTE_ERROR_ON(_func == nullptr);
987
Georgios Pinitas5a594532018-12-03 14:30:05 +0000988 (*_func)(_input1, _input2, _output, _policy, window);
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000989}