blob: 8e1ed3a2a5ce7d1d1e596d57204d9cfacebb3e51 [file] [log] [blame]
Sang-Hoon Park0d008f72020-03-13 14:56:05 +00001/*
Gunes Bayirc0713282023-09-14 15:14:48 +01002 * Copyright (c) 2020-2021, 2023 Arm Limited.
Sang-Hoon Park0d008f72020-03-13 14:56:05 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouebcebf12020-10-21 00:04:14 +010024#include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000025
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000026#include "arm_compute/core/Helpers.h"
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000027#include "arm_compute/core/TensorInfo.h"
28#include "arm_compute/core/Utils.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000030#include "arm_compute/core/Validate.h"
31#include "arm_compute/core/Window.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010032
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010033#include "src/core/CPP/Validate.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010034#include "src/core/helpers/AutoConfiguration.h"
35#include "src/core/helpers/WindowHelpers.h"
36#include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
Georgios Pinitasddb93bb2020-10-02 16:38:59 +010037#include "src/core/NEON/NEFixedPoint.h"
38#include "src/core/NEON/NEMath.h"
39#include "src/core/NEON/NESymm.h"
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000040
41#include <map>
42
43namespace arm_compute
44{
45namespace
46{
47inline std::pair<int64_t, int64_t> compute_mean_variance(int64_t sum, int64_t sum_sq, uint32_t num_input)
48{
49 const auto temp = static_cast<int64_t>(0x100000) / num_input;
50 const auto mean = sum * 1024 / static_cast<int64_t>(num_input);
51 const int64_t variance = ((sum_sq * temp) - (mean * mean)) / 0x100000;
52
53 return std::make_pair(mean, variance);
54}
55
56inline int64x2x2_t mul_add(const int32x4_t &a, const int32x4_t &b, const int32x4_t &bias)
57{
58 using namespace wrapper;
59 const int64x2_t a_low = vmovl(vgetlow(a));
60 const int64x2_t a_high = vmovl(vgethigh(a));
61 const int64x2_t b_low = vmovl(vgetlow(b));
62 const int64x2_t b_high = vmovl(vgethigh(b));
63
64 const int64_t a_0 = vgetlane(a_low, 0);
65 const int64_t a_1 = vgetlane(a_low, 1);
66 const int64_t a_2 = vgetlane(a_high, 0);
67 const int64_t a_3 = vgetlane(a_high, 1);
68
69 const int64_t b_0 = vgetlane(b_low, 0);
70 const int64_t b_1 = vgetlane(b_low, 1);
71 const int64_t b_2 = vgetlane(b_high, 0);
72 const int64_t b_3 = vgetlane(b_high, 1);
73
74 int64x2x2_t result;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010075 const int64x2_t result_0{a_0 * b_0, a_1 * b_1};
76 const int64x2_t result_1{a_2 * b_2, a_3 * b_3};
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000077 result.val[0] = vadd(vmovl(vgetlow(bias)), result_0);
78 result.val[1] = vadd(vmovl(vgethigh(bias)), result_1);
79
80 return result;
81}
82} // namespace
83
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010084void NEQLSTMLayerNormalizationKernel::configure(const ITensor *input,
85 ITensor *output,
86 const ITensor *weight,
87 const ITensor *bias)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000088{
Sang-Hoon Park9230e272020-04-18 00:46:34 +010089 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
90 ARM_COMPUTE_ERROR_ON(input == output);
91 ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), weight->info(), bias->info()));
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000092
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010093 static const std::map<DataType, ComputeFuncType> fn_map = {
94 {DataType::QSYMM16, std::mem_fn(&NEQLSTMLayerNormalizationKernel::compute_qsymm16)},
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000095 };
96
97 _input = input;
98 _output = output;
99 _weight = weight;
100 _bias = bias;
101 _fn = fn_map.at(_input->info()->data_type());
102
103 auto_init_if_empty(*_output->info(), *_input->info());
Sang-Hoon Park9230e272020-04-18 00:46:34 +0100104 _output->info()->set_quantization_info(compute_output_qinfo());
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000105
106 const UniformQuantizationInfo wq_info = _weight->info()->quantization_info().uniform();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100107 const Status s = quantization::calculate_quantized_multiplier(wq_info.scale, &_output_multiplier, &_output_shift);
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000108 _output_shift *= -1;
109
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100110 if (!bool(s))
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000111 {
112 _output_multiplier = 0;
113 _output_shift = 0;
114 }
115
116 Window win = configure_window(output);
117 INEKernel::configure(win);
118}
119
120Window NEQLSTMLayerNormalizationKernel::configure_window(ITensor *target)
121{
SiCongLib88272e2021-02-24 15:40:57 +0000122 Window window = calculate_max_window(*target->info(), Steps());
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000123
124 _window_start_x = static_cast<int32_t>(window.x().start());
125 _window_end_x = static_cast<int32_t>(window.x().end());
126 _window_step_x = static_cast<int32_t>(vector_size_byte) / _output->info()->element_size();
127
128 // input and output windows will iterator over y-axis, while execute_window will handler x-axis.
129 _inout_window = window;
130 _inout_window.set(Window::DimX, Window::Dimension(0, 1, 1));
131
132 // weight and bias cannot iterator along y-axis since they are 1D.
133 _weight_window = _inout_window;
134 _weight_window.set(Window::DimY, Window::Dimension(0, 1, 1));
135
136 return window;
137}
138
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100139Status NEQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input,
140 const ITensorInfo *output,
141 const ITensorInfo *weight,
142 const ITensorInfo *bias)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000143{
144 ARM_COMPUTE_UNUSED(output, bias, weight, input);
145
146 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
147
148 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QSYMM16);
149 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weight, 1, DataType::QSYMM16);
150 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
151
152 ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > max_input_dimension);
153 ARM_COMPUTE_RETURN_ERROR_ON(weight->num_dimensions() > max_weight_dimension);
154 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > max_bias_dimension);
155
156 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().x() != weight->tensor_shape().x());
157 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(weight, bias);
158
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100159 if (output->total_size() != 0)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000160 {
161 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
162 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
163 }
164
165 return Status{};
166}
167
168void NEQLSTMLayerNormalizationKernel::run(const Window &window, const ThreadInfo &info)
169{
170 ARM_COMPUTE_UNUSED(window, info);
171 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
172 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
173 ARM_COMPUTE_ERROR_ON_MSG(!_fn, "internal function is not defined for computation");
174
175 _fn(*this);
176}
177
Sang-Hoon Park9230e272020-04-18 00:46:34 +0100178inline QuantizationInfo NEQLSTMLayerNormalizationKernel::compute_output_qinfo()
179{
Sheri Zhang3a353982020-04-21 13:10:24 +0100180 return QuantizationInfo(1.f / 4096);
Sang-Hoon Park9230e272020-04-18 00:46:34 +0100181}
182
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000183inline std::pair<int64_t, int64_t> NEQLSTMLayerNormalizationKernel::sum_qsymm16(const int16_t *input_ptr)
184{
185 ARM_COMPUTE_ERROR_ON(!input_ptr);
186
187 using AccType = int64_t;
188 using InputDataType = int16_t;
189
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100190 AccType sum{0};
191 AccType sum_sq{0};
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000192
193 int32_t x = _window_start_x;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100194 for (; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000195 {
196 using namespace wrapper;
197 const int16x8_t val = vloadq(input_ptr + x);
198 const int32x4_t val_low = vmovl(vgetlow(val));
199 const int32x4_t val_high = vmovl(vgethigh(val));
200
201#if defined(__aarch64__)
202 sum += static_cast<AccType>(vaddv(val_low));
203 sum += static_cast<AccType>(vaddv(val_high));
204
205 sum_sq += static_cast<AccType>(vaddv(vmul(val_low, val_low)));
206 sum_sq += static_cast<AccType>(vaddv(vmul(val_high, val_high)));
207#else // __aarch64__
Gunes Bayirc0713282023-09-14 15:14:48 +0100208
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000209 // only AArch64 supports vaddv
210 const int64x2_t pair_sum_low = vpaddl(val_low);
211 const int64x2_t pair_sum_high = vpaddl(val_high);
212 const int64x2_t pair_sum = vadd(pair_sum_low, pair_sum_high);
213 sum += vgetlane(pair_sum, 0) + vgetlane(pair_sum, 1);
214
215 const int32x4_t square_low = vmul(val_low, val_low);
216 const int32x4_t square_high = vmul(val_high, val_high);
217 const int64x2_t pair_sum_sq_low = vpaddl(square_low);
218 const int64x2_t pair_sum_sq_high = vpaddl(square_high);
219 const int64x2_t pair_sum_sq = vadd(pair_sum_sq_low, pair_sum_sq_high);
220 sum_sq += vgetlane(pair_sum_sq, 0) + vgetlane(pair_sum_sq, 1);
221#endif // __aarch64__
222 }
223
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100224 for (; x < _window_end_x; ++x)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000225 {
226 const InputDataType val = input_ptr[x];
227 sum += static_cast<AccType>(val);
228 sum_sq += static_cast<AccType>(val * val);
229 }
230
231 return std::make_pair(sum, sum_sq);
232}
233
234inline void NEQLSTMLayerNormalizationKernel::normalize_qasymm16(const int16_t *input_ptr,
235 int16_t *output_ptr,
236 const int16_t *weight_ptr,
237 const int32_t *bias_ptr,
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100238 int32_t mean,
239 int32_t inv_std_mul,
240 int32_t inv_std_shift)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000241{
242 using OutputDataType = int16_t;
243
244 using namespace wrapper;
245 const int32x4_t mean_vec = vdup_n(mean, wrapper::traits::vector_128_tag{});
246
247 int32_t x = _window_start_x;
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100248 for (; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000249 {
250 const int16x8_t val = vloadq(input_ptr + x);
251 int32x4x2_t shifted;
252 shifted.val[0] = vsub(vshlq_n_s32(vmovl(vgetlow(val)), 10), mean_vec);
253 shifted.val[1] = vsub(vshlq_n_s32(vmovl(vgethigh(val)), 10), mean_vec);
254
255 int32x4x2_t rescaled = multiply_by_quantized_multiplier_2row(shifted, inv_std_mul, inv_std_shift);
256
257 const int16x8_t weight_val = vloadq(weight_ptr + x);
258 const int32x4_t weight_low = vmovl(vgetlow(weight_val));
259 const int32x4_t weight_high = vmovl(vgethigh(weight_val));
260
261 const int32x4_t bias_low = vloadq(bias_ptr + x);
262 const int32x4_t bias_high = vloadq(bias_ptr + 4 + x);
263
264 int64x2x2_t result_0 = mul_add(rescaled.val[0], weight_low, bias_low);
265 int64x2x2_t result_1 = mul_add(rescaled.val[1], weight_high, bias_high);
266
267 int32x4x2_t combined;
268 combined.val[0] = vcombine(vmovn(vrshrq_n_s64(result_0.val[0], 10)), vmovn(vrshrq_n_s64(result_0.val[1], 10)));
269 combined.val[1] = vcombine(vmovn(vrshrq_n_s64(result_1.val[0], 10)), vmovn(vrshrq_n_s64(result_1.val[1], 10)));
270
271 int32x4x2_t out_val = multiply_by_quantized_multiplier_2row(combined, _output_multiplier, _output_shift + 12);
272
273 vstore(output_ptr + x, vqmovn(out_val.val[0]));
274 vstore(output_ptr + x + 4, vqmovn(out_val.val[1]));
275 }
276
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100277 for (; x < _window_end_x; ++x)
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000278 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100279 const auto val = static_cast<int32_t>(input_ptr[x]);
280 const int32_t shifted = (val << 10) - mean;
281 const int32_t rescaled = quantization::multiply_by_quantized_multiplier(shifted, inv_std_mul, inv_std_shift);
282 const int64_t weighted = rescaled * weight_ptr[x] + bias_ptr[x];
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000283 const auto reverse_shifted = static_cast<int32_t>((weighted + 512) >> 10);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100284 int32_t out_val =
285 quantization::multiply_by_quantized_multiplier(reverse_shifted, _output_multiplier, _output_shift + 12);
286 out_val =
287 utility::clamp<decltype(out_val), OutputDataType>(out_val, std::numeric_limits<OutputDataType>::min());
288 output_ptr[x] = static_cast<OutputDataType>(out_val);
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000289 }
290}
291
292void NEQLSTMLayerNormalizationKernel::compute_qsymm16()
293{
294 using InputDataType = int16_t;
295 using OutputDataType = int16_t;
296 using BiasDataType = int32_t;
297 using AccType = int64_t;
298
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100299 Iterator input_iterator{_input, _inout_window};
300 Iterator output_iterator{_output, _inout_window};
301 Iterator weight_iterator{_weight, _weight_window};
302 Iterator bias_iterator{_bias, _weight_window};
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000303
304 const auto weight_ptr = reinterpret_cast<const InputDataType *>(weight_iterator.ptr());
305 const auto bias_ptr = reinterpret_cast<const BiasDataType *>(bias_iterator.ptr());
306
307 const uint32_t column_size = _input->info()->tensor_shape()[0];
308
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100309 execute_window_loop(
310 _inout_window,
311 [&, this](const Coordinates &)
312 {
313 const auto in_ptr = reinterpret_cast<const InputDataType *>(input_iterator.ptr());
314 auto out_ptr = reinterpret_cast<OutputDataType *>(output_iterator.ptr());
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000315
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100316 AccType sum{0};
317 AccType sum_sq{0};
318 std::tie(sum, sum_sq) = sum_qsymm16(in_ptr);
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000319
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100320 AccType mean{0};
321 AccType variance{0};
322 std::tie(mean, variance) = compute_mean_variance(sum, sum_sq, column_size);
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000323
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100324 int32_t stddev_invsqrt_mul{};
325 int32_t stddev_invsqrt_shift{};
326 quantization::get_invsqrt_quantized_multiplier_exp(static_cast<int32_t>(variance), -1, stddev_invsqrt_mul,
327 stddev_invsqrt_shift);
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000328
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100329 normalize_qasymm16(in_ptr, out_ptr, weight_ptr, bias_ptr, mean, stddev_invsqrt_mul, stddev_invsqrt_shift);
330 },
331 input_iterator, output_iterator);
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000332}
Gunes Bayirc0713282023-09-14 15:14:48 +0100333} // namespace arm_compute