blob: 6198a257fc323b82b4b32175187b7d722ff916c2 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Viet-Hoa Dofd472f02023-03-15 14:05:06 +00002 * Copyright (c) 2016-2023 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
SiCongLi410e21e2020-12-11 15:07:53 +000024#include "support/ToolchainSupport.h"
25
Manuel Bottinied753262019-05-15 15:30:47 +010026#include <cmath>
morgolock3155f772020-05-11 16:00:04 +010027#include <limits>
Manuel Bottinied753262019-05-15 15:30:47 +010028
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029namespace arm_compute
30{
Alex Gildayc357c472018-03-21 13:54:09 +000031/** Logarithm polynomial coefficients */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032const std::array<float32x4_t, 8> log_tab =
33{
34 {
35 vdupq_n_f32(-2.29561495781f),
36 vdupq_n_f32(-2.47071170807f),
37 vdupq_n_f32(-5.68692588806f),
38 vdupq_n_f32(-0.165253549814f),
39 vdupq_n_f32(5.17591238022f),
40 vdupq_n_f32(0.844007015228f),
41 vdupq_n_f32(4.58445882797f),
42 vdupq_n_f32(0.0141278216615f),
43 }
44};
45
Manuel Bottinied753262019-05-15 15:30:47 +010046/** Sin polynomial coefficients */
47constexpr float te_sin_coeff2 = 0.166666666666f; // 1/(2*3)
48constexpr float te_sin_coeff3 = 0.05f; // 1/(4*5)
49constexpr float te_sin_coeff4 = 0.023809523810f; // 1/(6*7)
50constexpr float te_sin_coeff5 = 0.013888888889f; // 1/(8*9)
51
Alex Gildayc357c472018-03-21 13:54:09 +000052#ifndef DOXYGEN_SKIP_THIS
Viet-Hoa Do86689cd2022-11-21 17:17:56 +000053inline float32x4_t prefer_vfmaq_f32(float32x4_t a, float32x4_t b, float32x4_t c)
54{
55#ifdef __aarch64__
56 return vfmaq_f32(a, b, c);
Ramy Elgammal7fefac72023-04-20 12:32:03 +010057#else // __aarch64__
Viet-Hoa Do86689cd2022-11-21 17:17:56 +000058 return vmlaq_f32(a, b, c);
59#endif // __aarch64__
60}
61
Georgios Pinitasd8e765b2017-08-02 13:44:33 +010062inline float32x4_t vfloorq_f32(float32x4_t val)
63{
64 static const float32x4_t CONST_1 = vdupq_n_f32(1.f);
65
66 const int32x4_t z = vcvtq_s32_f32(val);
67 const float32x4_t r = vcvtq_f32_s32(z);
68
69 return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, CONST_1), r);
70}
71
Usama Arif0a5a57a2019-05-23 14:20:33 +010072inline float32x4_t vroundq_rte_f32(float32x4_t val)
73{
74#ifdef __aarch64__
75 return vrndnq_f32(val);
Ramy Elgammal7fefac72023-04-20 12:32:03 +010076#else // __aarch64__
Usama Arif0a5a57a2019-05-23 14:20:33 +010077 static const float32x4_t CONST_HALF_FLOAT = vdupq_n_f32(0.5f);
Manuel Bottini7bb56c62019-06-26 15:17:09 +010078 static const float32x4_t CONST_1_FLOAT = vdupq_n_f32(1.f);
79 static const int32x4_t CONST_1_INT = vdupq_n_s32(1);
80 const float32x4_t floor_val = vfloorq_f32(val);
81 const float32x4_t diff = vsubq_f32(val, floor_val);
Ramy Elgammal7fefac72023-04-20 12:32:03 +010082 const float32x4_t fp32_upper_limit = vreinterpretq_f32_u32(vdupq_n_u32(0x4B000000)); // 0x4B000000 = (23U + 127U) << 23U
Usama Arif0a5a57a2019-05-23 14:20:33 +010083
84 /*
Ramy Elgammal7fefac72023-04-20 12:32:03 +010085 * 1. Select the floor value when (diff<0.5 || (diff==0.5 && floor_val%2==0).
86 * This condition is checked by vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT) ,vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT) , vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT),CONST_1_INT))))
87 *
88 * 2. In case the input value (val) is out of signed int32 range, then simple use the input value as the rounded value
89 * Because:
90 * in this case converting to int32 would saturate
91 * If the input float value is >= 2^23 * 1.00... 23 Zeros ..0 then the rounded value is exactly equal to the input value.
92 * Because:
93 * in IEEE single precision floating point representation the fraction part is 23 bit, so if exponent is 23 it means the fraction part = 0 as any digits after decimal point are truncated.
94 * Hence, rounding has no effect:
95 * Threshold upper limit with format |S|E(8bits)| Fraction(23bits) | = (23 + 127) << 23 (assuming positive sign): Adding 127, because 127 represents the actual zero in this format.
Usama Arif0a5a57a2019-05-23 14:20:33 +010096 */
97
Ramy Elgammal7fefac72023-04-20 12:32:03 +010098 float32x4_t rounded_val = vbslq_f32(vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT),
99 vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT),
100 vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT),CONST_1_INT)))),
101 floor_val, vaddq_f32(floor_val, CONST_1_FLOAT));
102
103 float32x4_t result = vbslq_f32(vcgeq_f32(vabsq_f32(val), fp32_upper_limit), val, rounded_val);
104
105 return result;
Usama Arif0a5a57a2019-05-23 14:20:33 +0100106#endif // __aarch64__
107}
108
Georgios Pinitascdf51452017-08-31 14:21:36 +0100109inline float32x2_t vinvsqrt_f32(float32x2_t x)
110{
111 float32x2_t sqrt_reciprocal = vrsqrte_f32(x);
112 sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
113 sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
114
115 return sqrt_reciprocal;
116}
117
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100118inline float32x4_t vinvsqrtq_f32(float32x4_t x)
119{
120 float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
121 sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
122 sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
123
124 return sqrt_reciprocal;
125}
126
Georgios Pinitascdf51452017-08-31 14:21:36 +0100127inline float32x2_t vinv_f32(float32x2_t x)
128{
129 float32x2_t recip = vrecpe_f32(x);
130 recip = vmul_f32(vrecps_f32(x, recip), recip);
131 recip = vmul_f32(vrecps_f32(x, recip), recip);
132 return recip;
133}
134
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100135inline float32x4_t vinvq_f32(float32x4_t x)
136{
137 float32x4_t recip = vrecpeq_f32(x);
138 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
139 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
140 return recip;
141}
142
143inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const std::array<float32x4_t, 8> &coeffs)
144{
145 float32x4_t A = vmlaq_f32(coeffs[0], coeffs[4], x);
146 float32x4_t B = vmlaq_f32(coeffs[2], coeffs[6], x);
147 float32x4_t C = vmlaq_f32(coeffs[1], coeffs[5], x);
148 float32x4_t D = vmlaq_f32(coeffs[3], coeffs[7], x);
149 float32x4_t x2 = vmulq_f32(x, x);
150 float32x4_t x4 = vmulq_f32(x2, x2);
151 float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
152 return res;
153}
154
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100155static const uint32_t exp_f32_coeff[] =
156{
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000157 0x3f7ffff6, // x^1: 0x1.ffffecp-1f
158 0x3efffedb, // x^2: 0x1.fffdb6p-2f
159 0x3e2aaf33, // x^3: 0x1.555e66p-3f
160 0x3d2b9f17, // x^4: 0x1.573e2ep-5f
161 0x3c072010, // x^5: 0x1.0e4020p-7f
162};
163
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100164inline float32x4_t vexpq_f32(float32x4_t x)
165{
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000166 const auto c1 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[0]));
167 const auto c2 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[1]));
168 const auto c3 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[2]));
169 const auto c4 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[3]));
170 const auto c5 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[4]));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100171
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100172 const auto shift = vreinterpretq_f32_u32(vdupq_n_u32(0x4b00007f)); // 2^23 + 127 = 0x1.0000fep23f
173 const auto inv_ln2 = vreinterpretq_f32_u32(vdupq_n_u32(0x3fb8aa3b)); // 1 / ln(2) = 0x1.715476p+0f
174 const auto neg_ln2_hi = vreinterpretq_f32_u32(vdupq_n_u32(0xbf317200)); // -ln(2) from bits -1 to -19: -0x1.62e400p-1f
175 const auto neg_ln2_lo = vreinterpretq_f32_u32(vdupq_n_u32(0xb5bfbe8e)); // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100176
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000177 const auto inf = vdupq_n_f32(std::numeric_limits<float>::infinity());
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100178 const auto max_input = vdupq_n_f32(88.37f); // Approximately ln(2^127.5)
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000179 const auto zero = vdupq_n_f32(0.f);
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100180 const auto min_input = vdupq_n_f32(-86.64f); // Approximately ln(2^-125)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100181
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000182 // Range reduction:
183 // e^x = 2^n * e^r
184 // where:
185 // n = floor(x / ln(2))
186 // r = x - n * ln(2)
187 //
188 // By adding x / ln(2) with 2^23 + 127 (shift):
189 // * As FP32 fraction part only has 23-bits, the addition of 2^23 + 127 forces decimal part
190 // of x / ln(2) out of the result. The integer part of x / ln(2) (i.e. n) + 127 will occupy
191 // the whole fraction part of z in FP32 format.
192 // Subtracting 2^23 + 127 (shift) from z will result in the integer part of x / ln(2)
193 // (i.e. n) because the decimal part has been pushed out and lost.
194 // * The addition of 127 makes the FP32 fraction part of z ready to be used as the exponent
195 // in FP32 format. Left shifting z by 23 bits will result in 2^n.
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100196 const auto z = prefer_vfmaq_f32(shift, x, inv_ln2);
197 const auto n = z - shift;
198 const auto scale = vreinterpretq_f32_u32(vreinterpretq_u32_f32(z) << 23); // 2^n
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000199
200 // The calculation of n * ln(2) is done using 2 steps to achieve accuracy beyond FP32.
201 // This outperforms longer Taylor series (3-4 tabs) both in term of accuracy and performance.
202 const auto r_hi = prefer_vfmaq_f32(x, n, neg_ln2_hi);
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100203 const auto r = prefer_vfmaq_f32(r_hi, n, neg_ln2_lo);
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000204
205 // Compute the truncated Taylor series of e^r.
206 // poly = scale * (1 + c1 * r + c2 * r^2 + c3 * r^3 + c4 * r^4 + c5 * r^5)
207 const auto r2 = r * r;
208
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100209 const auto p1 = c1 * r;
210 const auto p23 = prefer_vfmaq_f32(c2, c3, r);
211 const auto p45 = prefer_vfmaq_f32(c4, c5, r);
212 const auto p2345 = prefer_vfmaq_f32(p23, p45, r2);
Viet-Hoa Do86689cd2022-11-21 17:17:56 +0000213 const auto p12345 = prefer_vfmaq_f32(p1, p2345, r2);
214
215 auto poly = prefer_vfmaq_f32(scale, p12345, scale);
216
217 // Handle underflow and overflow.
218 poly = vbslq_f32(vcltq_f32(x, min_input), zero, poly);
219 poly = vbslq_f32(vcgtq_f32(x, max_input), inf, poly);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100220
221 return poly;
222}
223
Murray Kornelsen926f5022022-07-13 21:22:39 -0400224#ifdef __aarch64__
225inline float32x4_t verfq_f32(float32x4_t x)
226{
227 static const float erffdata[4] = { 0.278393f, 0.230389f, 0.000972f, 0.078108f };
228 static const float32x4_t coeffdata = vld1q_f32(erffdata);
229 static const float32x4_t onev{ vdupq_n_f32(1.0f) };
230
231 uint32x4_t selector = vcltzq_f32(x);
232
233 float32x4_t absx = vabsq_f32(x);
234 float32x4_t absx2 = vmulq_f32(x, x);
235 float32x4_t absx3 = vmulq_f32(absx2, absx);
236 float32x4_t absx4 = vmulq_f32(absx2, absx2);
237
238 float32x4_t denom = onev;
239 denom = vfmaq_laneq_f32(denom, absx, coeffdata, 0);
240 denom = vfmaq_laneq_f32(denom, absx2, coeffdata, 1);
241 denom = vfmaq_laneq_f32(denom, absx3, coeffdata, 2);
242 denom = vfmaq_laneq_f32(denom, absx4, coeffdata, 3);
243
244 denom = vmulq_f32(denom, denom);
245 denom = vmulq_f32(denom, denom);
246
247 float32x4_t fract = onev;
248 fract = vdivq_f32(fract, denom);
249
250 float32x4_t result = onev;
251 result = vsubq_f32(result, fract);
252
253 float32x4_t inverse = vnegq_f32(result);
254
255 result = vbslq_f32(selector, inverse, result);
256
257 return result;
258}
259#endif // #ifdef __aarch64__
260
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100261inline float32x4_t vlogq_f32(float32x4_t x)
262{
263 static const int32x4_t CONST_127 = vdupq_n_s32(127); // 127
264 static const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
265
266 // Extract exponent
267 int32x4_t m = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127);
268 float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
269
270 // Polynomial Approximation
271 float32x4_t poly = vtaylor_polyq_f32(val, log_tab);
272
273 // Reconstruct
274 poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2);
275
276 return poly;
277}
278
279inline float32x4_t vtanhq_f32(float32x4_t val)
280{
281 static const float32x4_t CONST_1 = vdupq_n_f32(1.f);
282 static const float32x4_t CONST_2 = vdupq_n_f32(2.f);
283 static const float32x4_t CONST_MIN_TANH = vdupq_n_f32(-10.f);
284 static const float32x4_t CONST_MAX_TANH = vdupq_n_f32(10.f);
Aleksandr Nikolaev7e9f34d2021-05-04 16:46:27 +0100285 static const float32x4_t CONST_THR = vdupq_n_f32(5.e-3);
286 static const float32x4_t CONST_1_3 = vdupq_n_f32(0.3333333f);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100287
Sheri Zhang5dda2172021-10-15 19:54:17 +0100288 float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH);
Aleksandr Nikolaev7e9f34d2021-05-04 16:46:27 +0100289 // x * (1 - x^2/3) if |x| < 5.e-3 or (exp2x - 1) / (exp2x + 1) otherwise
290 float32x4_t exp2x = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vexpq_f32(vmulq_f32(CONST_2, x)), vmulq_f32(x, x));
291 float32x4_t num = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vsubq_f32(exp2x, CONST_1), vmulq_f32(CONST_1_3, exp2x));
292 float32x4_t den = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vaddq_f32(exp2x, CONST_1), vsubq_f32(CONST_1, num));
293 float32x4_t tanh = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vmulq_f32(num, vinvq_f32(den)), vmulq_f32(x, den));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100294 return tanh;
295}
296
297inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n)
298{
299 return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
300}
Manuel Bottinied753262019-05-15 15:30:47 +0100301
302inline float32x4_t vsinq_f32(float32x4_t val)
303{
304 const float32x4_t pi_v = vdupq_n_f32(M_PI);
305 const float32x4_t pio2_v = vdupq_n_f32(M_PI / 2);
306 const float32x4_t ipi_v = vdupq_n_f32(1 / M_PI);
307
308 //Find positive or negative
309 const int32x4_t c_v = vabsq_s32(vcvtq_s32_f32(vmulq_f32(val, ipi_v)));
310 const uint32x4_t sign_v = vcleq_f32(val, vdupq_n_f32(0));
311 const uint32x4_t odd_v = vandq_u32(vreinterpretq_u32_s32(c_v), vdupq_n_u32(1));
312
313 uint32x4_t neg_v = veorq_u32(odd_v, sign_v);
314
315 //Modulus a - (n * int(a*(1/n)))
316 float32x4_t ma = vsubq_f32(vabsq_f32(val), vmulq_f32(pi_v, vcvtq_f32_s32(c_v)));
317 const uint32x4_t reb_v = vcgeq_f32(ma, pio2_v);
318
319 //Rebase a between 0 and pi/2
320 ma = vbslq_f32(reb_v, vsubq_f32(pi_v, ma), ma);
321
322 //Taylor series
323 const float32x4_t ma2 = vmulq_f32(ma, ma);
324
325 //2nd elem: x^3 / 3!
326 float32x4_t elem = vmulq_f32(vmulq_f32(ma, ma2), vdupq_n_f32(te_sin_coeff2));
327 float32x4_t res = vsubq_f32(ma, elem);
328
329 //3rd elem: x^5 / 5!
330 elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff3));
331 res = vaddq_f32(res, elem);
332
333 //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val)
334 elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff4));
335 res = vsubq_f32(res, elem);
336
337 //5th elem: x^9 / 9!
338 elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff5));
339 res = vaddq_f32(res, elem);
340
341 //Change of sign
342 neg_v = vshlq_n_u32(neg_v, 31);
343 res = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(res), neg_v));
344 return res;
345}
346
347inline float32x2_t vsin_f32(float32x2_t val)
348{
349 const float32x2_t pi_v = vdup_n_f32(M_PI);
350 const float32x2_t pio2_v = vdup_n_f32(M_PI / 2);
351 const float32x2_t ipi_v = vdup_n_f32(1 / M_PI);
352
353 //Find positive or negative
354 const int32x2_t c_v = vabs_s32(vcvt_s32_f32(vmul_f32(val, ipi_v)));
355 const uint32x2_t sign_v = vcle_f32(val, vdup_n_f32(0));
356 const uint32x2_t odd_v = vand_u32(vreinterpret_u32_s32(c_v), vdup_n_u32(1));
357
358 uint32x2_t neg_v = veor_u32(odd_v, sign_v);
359
360 //Modulus a - (n * int(a*(1/n)))
361 float32x2_t ma = vsub_f32(vabs_f32(val), vmul_f32(pi_v, vcvt_f32_s32(c_v)));
362 const uint32x2_t reb_v = vcge_f32(ma, pio2_v);
363
364 //Rebase a between 0 and pi/2
365 ma = vbsl_f32(reb_v, vsub_f32(pi_v, ma), ma);
366
367 //Taylor series
368 const float32x2_t ma2 = vmul_f32(ma, ma);
369
370 //2nd elem: x^3 / 3!
371 float32x2_t elem = vmul_f32(vmul_f32(ma, ma2), vdup_n_f32(te_sin_coeff2));
372 float32x2_t res = vsub_f32(ma, elem);
373
374 //3rd elem: x^5 / 5!
375 elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff3));
376 res = vadd_f32(res, elem);
377
378 //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val)
379 elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff4));
380 res = vsub_f32(res, elem);
381
382 //5th elem: x^9 / 9!
383 elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff5));
384 res = vadd_f32(res, elem);
385
386 //Change of sign
387 neg_v = vshl_n_u32(neg_v, 31);
388 res = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(res), neg_v));
389 return res;
390}
391
Alex Gildayc357c472018-03-21 13:54:09 +0000392#endif /* DOXYGEN_SKIP_THIS */
393
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100394inline int32x4_t rounding_divide_by_pow2(int32x4_t x, int32x4_t exponent)
395{
396 const int32x4_t shift_vec = vnegq_s32(exponent);
397 const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31);
398 const int32x4_t fixed_up_x = vqaddq_s32(x, fixup);
399 return vrshlq_s32(fixed_up_x, shift_vec);
400}
401
Manuel Bottini7bb56c62019-06-26 15:17:09 +0100402inline int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent)
403{
404 const int32x4_t shift_vec = vdupq_n_s32(-exponent);
405 const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31);
406 const int32x4_t fixed_up_x = vqaddq_s32(x, fixup);
407 return vrshlq_s32(fixed_up_x, shift_vec);
408}
409
410inline int32_t rounding_divide_by_pow2(int32_t x, int exponent)
411{
412 const int32_t mask = (1 << exponent) - 1;
413 const int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
414 return (x >> exponent) + ((x & mask) > threshold ? 1 : 0);
415}
416
Manuel Bottini21079dd2019-10-29 17:20:09 +0000417inline float32x4x4_t convert_uint8x16_to_float32x4x4(const uint8x16_t &in)
418{
419 float32x4x4_t out;
420
421 const auto tmp1 = vmovl_u8(vget_low_u8(in));
422 out.val[0] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
423 out.val[1] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
424
425 const auto tmp2 = vmovl_u8(vget_high_u8(in));
426 out.val[2] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp2)));
427 out.val[3] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp2)));
428 return out;
429}
430
Sang-Hoon Parkc3a74202019-11-22 16:05:46 +0000431inline float32x4x4_t convert_int8x16_to_float32x4x4(const int8x16_t &in)
432{
433 float32x4x4_t out;
434
435 const auto tmp1 = vmovl_s8(vget_low_s8(in));
436 out.val[0] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp1)));
437 out.val[1] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp1)));
438
439 const auto tmp2 = vmovl_s8(vget_high_s8(in));
440 out.val[2] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp2)));
441 out.val[3] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp2)));
442 return out;
443}
444
Manuel Bottini4370cff2020-02-07 16:31:59 +0000445template <>
446inline float32x4x4_t convert_to_float32x4x4(const uint8x16_t &in)
447{
448 return convert_uint8x16_to_float32x4x4(in);
449}
450
451template <>
452inline float32x4x4_t convert_to_float32x4x4(const int8x16_t &in)
453{
454 return convert_int8x16_to_float32x4x4(in);
455}
456
Manuel Bottini21079dd2019-10-29 17:20:09 +0000457inline void convert_float32x4x3_to_uint8x8x3(const float32x4x3_t &in1, const float32x4x3_t &in2, uint8x8x3_t &out)
458{
459 out.val[0] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[0])),
460 vqmovn_u32(vcvtq_u32_f32(in2.val[0]))));
461 out.val[1] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[1])),
462 vqmovn_u32(vcvtq_u32_f32(in2.val[1]))));
463 out.val[2] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[2])),
464 vqmovn_u32(vcvtq_u32_f32(in2.val[2]))));
465}
466
Sang-Hoon Parkc3a74202019-11-22 16:05:46 +0000467inline void convert_float32x4x4_to_uint8x16(const float32x4x4_t &in, uint8x16_t &out)
Manuel Bottini21079dd2019-10-29 17:20:09 +0000468{
469 const auto low = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[0])),
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100470 vqmovn_u32(vcvtq_u32_f32(in.val[1])));
Manuel Bottini21079dd2019-10-29 17:20:09 +0000471 const auto high = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[2])),
472 vqmovn_u32(vcvtq_u32_f32(in.val[3])));
473 out = vcombine_u8(vqmovn_u16(low), vqmovn_u16(high));
474}
475
Sang-Hoon Parkc3a74202019-11-22 16:05:46 +0000476inline void convert_float32x4x4_to_int8x16(const float32x4x4_t &in, int8x16_t &out)
477{
478 const auto low = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[0])),
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100479 vqmovn_s32(vcvtq_s32_f32(in.val[1])));
Sang-Hoon Parkc3a74202019-11-22 16:05:46 +0000480 const auto high = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[2])),
481 vqmovn_s32(vcvtq_s32_f32(in.val[3])));
482 out = vcombine_s8(vqmovn_s16(low), vqmovn_s16(high));
483}
484
Sang-Hoon Parkdcf3c7e2021-03-04 17:03:46 +0000485template <>
486inline uint8x16_t convert_float_to_int<float32x4x4_t, uint8x16_t>(const float32x4x4_t &in)
487{
488 uint8x16_t out;
489 convert_float32x4x4_to_uint8x16(in, out);
490 return out;
491}
492
493template <>
494inline float32x4x4_t convert_int_to_float<float32x4x4_t, uint8x16_t>(const uint8x16_t &in)
495{
496 return convert_uint8x16_to_float32x4x4(in);
497}
498
499template <>
500inline int8x16_t convert_float_to_int<float32x4x4_t, int8x16_t>(const float32x4x4_t &in)
501{
502 int8x16_t out;
503 convert_float32x4x4_to_int8x16(in, out);
504 return out;
505}
506
507template <>
508inline float32x4x4_t convert_int_to_float<float32x4x4_t, int8x16_t>(const int8x16_t &in)
509{
510 return convert_int8x16_to_float32x4x4(in);
511}
512
Sheri Zhang5dda2172021-10-15 19:54:17 +0100513inline float vreduce(const float32x4_t &v)
514{
515 const float32x2_t v0 = vget_high_f32(v);
516 const float32x2_t v1 = vget_low_f32(v);
517 const float32x2_t v_out = vadd_f32(v0, v1);
518
519 const float a = vget_lane_f32(v_out, 0);
520 const float b = vget_lane_f32(v_out, 1);
521
522 return a + b;
523}
524
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000525#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Alex Gildayc357c472018-03-21 13:54:09 +0000526/** Exponent polynomial coefficients */
Alex Gildayc357c472018-03-21 13:54:09 +0000527/** Logarithm polynomial coefficients */
Alex Gildayc357c472018-03-21 13:54:09 +0000528#ifndef DOXYGEN_SKIP_THIS
Georgios Pinitas565bf2d2018-08-31 11:46:49 +0100529inline float16x8_t vfloorq_f16(float16x8_t val)
530{
531 static const float16x8_t CONST_1 = vdupq_n_f16(1.f);
532
533 const int16x8_t z = vcvtq_s16_f16(val);
534 const float16x8_t r = vcvtq_f16_s16(z);
535
536 return vbslq_f16(vcgtq_f16(r, val), vsubq_f16(r, CONST_1), r);
537}
Usama Arif0a5a57a2019-05-23 14:20:33 +0100538
539inline float16x8_t vroundq_rte_f16(float16x8_t val)
540{
541 return vrndnq_f16(val);
542}
543
Georgios Pinitascdf51452017-08-31 14:21:36 +0100544inline float16x4_t vinvsqrt_f16(float16x4_t x)
545{
546 float16x4_t sqrt_reciprocal = vrsqrte_f16(x);
547 sqrt_reciprocal = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
548 sqrt_reciprocal = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
549 return sqrt_reciprocal;
550}
551
Pablo Tello91654c42017-07-05 11:32:17 +0100552inline float16x8_t vinvsqrtq_f16(float16x8_t x)
553{
554 float16x8_t sqrt_reciprocal = vrsqrteq_f16(x);
555 sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
556 sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
Pablo Tello91654c42017-07-05 11:32:17 +0100557 return sqrt_reciprocal;
558}
Pablo Tellodf246182017-07-03 16:25:09 +0100559
Georgios Pinitascdf51452017-08-31 14:21:36 +0100560inline float16x4_t vinv_f16(float16x4_t x)
561{
562 float16x4_t recip = vrecpe_f16(x);
563 recip = vmul_f16(vrecps_f16(x, recip), recip);
564 recip = vmul_f16(vrecps_f16(x, recip), recip);
565 return recip;
566}
567
Pablo Tellodf246182017-07-03 16:25:09 +0100568inline float16x8_t vinvq_f16(float16x8_t x)
569{
570 float16x8_t recip = vrecpeq_f16(x);
571 recip = vmulq_f16(vrecpsq_f16(x, recip), recip);
572 recip = vmulq_f16(vrecpsq_f16(x, recip), recip);
573 return recip;
574}
575
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000576inline float16x4_t vtanh_rational_approx_f16(float16x4_t x16)
Pablo Tello91654c42017-07-05 11:32:17 +0100577{
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000578 // Calculate rational approximation part of tanh exactly on a half-register of F16 by using F32s
579 // Note: doesn't handle overflows, needs truncating at |x| = 4.508
580 const float32x4_t x = vcvt_f32_f16(x16);
581
582 const float32x4_t ONE = vdupq_n_f32(1.0f);
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100583 const float32x4_t C1 = vdupq_n_f32(0.43760237f);
584 const float32x4_t C2 = vdupq_n_f32(0.104402f);
585 const float32x4_t C3 = vdupq_n_f32(0.013442706f);
586 const float32x4_t C4 = vdupq_n_f32(0.00073561433f);
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000587
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100588 const float32x4_t x2 = vmulq_f32(x, x);
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000589
590 // Denominator polynomial 1 + C1*x^2 + C3*x^4
591 float32x4_t denom = vfmaq_f32(C1, C3, x2);
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100592 denom = vfmaq_f32(ONE, x2, denom);
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000593
594 // Numerator polynomial x*(1 + C2*x^2 + C4*x^4)
595 float32x4_t numer = vfmaq_f32(C2, C4, x2);
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100596 numer = vfmaq_f32(ONE, x2, numer);
597 numer = vmulq_f32(numer, x);
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000598
599 return vcvt_f16_f32(vdivq_f32(numer, denom));
600}
601
602inline float16x8_t vtanhq_f16(float16x8_t x)
603{
604 // Split into high/low and use rational approximation on both parts exactly
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100605 const float16x8_t tanh = vcombine_f16(vtanh_rational_approx_f16(vget_low_f16(x)),
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000606 vtanh_rational_approx_f16(vget_high_f16(x)));
607
608 // tanh(x) == sign(x) to F16 precision for |x| >= 4.508, use sign after this
Ramy Elgammal7fefac72023-04-20 12:32:03 +0100609 const float16x8_t ONE = vdupq_n_f16(1.0f);
610 const float16x8_t MAX_X = vdupq_n_f16(4.508f);
611 const auto at_limit = vcageq_f16(x, MAX_X); // |x| >= 4.508
612 const float16x8_t sign_x = vbslq_f16(vclezq_f16(x), -ONE, ONE);
Jonathan Deakin2bc8cfe2022-10-13 10:50:25 +0000613 return vbslq_f16(at_limit, sign_x, tanh);
Pablo Tello91654c42017-07-05 11:32:17 +0100614}
615
Pablo Tellodf246182017-07-03 16:25:09 +0100616inline float16x8_t vtaylor_polyq_f16(float16x8_t x, const std::array<float16x8_t, 8> &coeffs)
617{
618 const float16x8_t A = vaddq_f16(coeffs[0], vmulq_f16(coeffs[4], x));
619 const float16x8_t B = vaddq_f16(coeffs[2], vmulq_f16(coeffs[6], x));
620 const float16x8_t C = vaddq_f16(coeffs[1], vmulq_f16(coeffs[5], x));
621 const float16x8_t D = vaddq_f16(coeffs[3], vmulq_f16(coeffs[7], x));
622 const float16x8_t x2 = vmulq_f16(x, x);
623 const float16x8_t x4 = vmulq_f16(x2, x2);
624 const float16x8_t res = vaddq_f16(vaddq_f16(A, vmulq_f16(B, x2)), vmulq_f16(vaddq_f16(C, vmulq_f16(D, x2)), x4));
625 return res;
626}
627
628inline float16x8_t vexpq_f16(float16x8_t x)
629{
Michele Di Giorgio1c948d42018-11-20 16:03:01 +0000630 const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
631 const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
Anthony Barbier3a6163e2018-08-10 17:36:36 +0100632
Georgios Pinitasf2cdce32019-12-09 18:35:57 +0000633 const float16x8_t res = vcombine_f16(vcvt_f16_f32(vexpq_f32(x_low)), vcvt_f16_f32(vexpq_f32(x_high)));
Michele Di Giorgio1c948d42018-11-20 16:03:01 +0000634 return res;
Pablo Tellodf246182017-07-03 16:25:09 +0100635}
636
Murray Kornelsen926f5022022-07-13 21:22:39 -0400637#ifdef __aarch64__
638inline float16x8_t verfq_f16(float16x8_t x)
639{
640 const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
641 const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
642
643 const float16x8_t res = vcombine_f16(vcvt_f16_f32(verfq_f32(x_low)), vcvt_f16_f32(verfq_f32(x_high)));
644 return res;
645}
646#endif // #ifdef __aarch64__
647
Pablo Tellodf246182017-07-03 16:25:09 +0100648inline float16x8_t vlogq_f16(float16x8_t x)
649{
Georgios Pinitas5a594532018-12-03 14:30:05 +0000650 const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
651 const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
Anthony Barbier3a6163e2018-08-10 17:36:36 +0100652
Georgios Pinitasf2cdce32019-12-09 18:35:57 +0000653 const float16x8_t res = vcombine_f16(vcvt_f16_f32(vlogq_f32(x_low)), vcvt_f16_f32(vlogq_f32(x_high)));
Georgios Pinitas5a594532018-12-03 14:30:05 +0000654 return res;
Pablo Tellodf246182017-07-03 16:25:09 +0100655}
656
657inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
658{
Gian Marco Iodicef2cde9b2018-08-23 15:29:16 +0100659 float32x4_t n0_f32 = vcvt_f32_f16(vget_low_f16(n));
660 float32x4_t n1_f32 = vcvt_f32_f16(vget_high_f16(n));
661 float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val));
662 float32x4_t val1_f32 = vcvt_f32_f16(vget_high_f16(val));
663
664 float32x4_t res0_f32 = vexpq_f32(vmulq_f32(n0_f32, vlogq_f32(val0_f32)));
665 float32x4_t res1_f32 = vexpq_f32(vmulq_f32(n1_f32, vlogq_f32(val1_f32)));
666
667 return vcombine_f16(vcvt_f16_f32(res0_f32), vcvt_f16_f32(res1_f32));
Pablo Tellodf246182017-07-03 16:25:09 +0100668}
Manuel Bottinied753262019-05-15 15:30:47 +0100669
670inline float16x8_t vsinq_f16(float16x8_t val)
671{
672 const float32x4_t val_high = vcvt_f32_f16(vget_high_f16(val));
673 const float32x4_t val_low = vcvt_f32_f16(vget_low_f16(val));
674
675 const float32x4_t res_high = vsinq_f32(val_high);
676 const float32x4_t res_low = vsinq_f32(val_low);
677
678 return vcombine_f16(vcvt_f16_f32(res_low), vcvt_f16_f32(res_high));
679}
680
681inline float16x4_t vsin_f16(float16x4_t val)
682{
683 const float32x4_t val_f32 = vcvt_f32_f16(val);
684 const float32x2_t val_high = vget_high_f32(val_f32);
685 const float32x2_t val_low = vget_low_f32(val_f32);
686
687 const float32x2_t res_high = vsin_f32(val_high);
688 const float32x2_t res_low = vsin_f32(val_low);
689
690 return vcvt_f16_f32(vcombine_f32(res_low, res_high));
691}
692
Sheri Zhang5dda2172021-10-15 19:54:17 +0100693inline float16_t vreduce(const float16x8_t &v)
694{
695 const float16x4_t v0 = vget_high_f16(v);
696 const float16x4_t v1 = vget_low_f16(v);
697 const float16x4_t v_out = vadd_f16(v0, v1);
698
699 const float16_t a = vget_lane_f16(v_out, 0);
700 const float16_t b = vget_lane_f16(v_out, 1);
701 const float16_t c = vget_lane_f16(v_out, 2);
702 const float16_t d = vget_lane_f16(v_out, 3);
703
704 return a + b + c + d;
705}
Alex Gildayc357c472018-03-21 13:54:09 +0000706#endif /* DOXYGEN_SKIP_THIS */
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000707#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Gian Marco Iodice356f6432017-09-22 11:32:21 +0100708} // namespace arm_compute