Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 1 | /* |
Murray Kornelsen | 926f502 | 2022-07-13 21:22:39 -0400 | [diff] [blame] | 2 | * Copyright (c) 2016-2022 Arm Limited. |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
SiCongLi | 410e21e | 2020-12-11 15:07:53 +0000 | [diff] [blame] | 24 | #include "support/ToolchainSupport.h" |
| 25 | |
Manuel Bottini | ed75326 | 2019-05-15 15:30:47 +0100 | [diff] [blame] | 26 | #include <cmath> |
morgolock | 3155f77 | 2020-05-11 16:00:04 +0100 | [diff] [blame] | 27 | #include <limits> |
Manuel Bottini | ed75326 | 2019-05-15 15:30:47 +0100 | [diff] [blame] | 28 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 29 | namespace arm_compute |
| 30 | { |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 31 | /** Logarithm polynomial coefficients */ |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 32 | const std::array<float32x4_t, 8> log_tab = |
| 33 | { |
| 34 | { |
| 35 | vdupq_n_f32(-2.29561495781f), |
| 36 | vdupq_n_f32(-2.47071170807f), |
| 37 | vdupq_n_f32(-5.68692588806f), |
| 38 | vdupq_n_f32(-0.165253549814f), |
| 39 | vdupq_n_f32(5.17591238022f), |
| 40 | vdupq_n_f32(0.844007015228f), |
| 41 | vdupq_n_f32(4.58445882797f), |
| 42 | vdupq_n_f32(0.0141278216615f), |
| 43 | } |
| 44 | }; |
| 45 | |
Manuel Bottini | ed75326 | 2019-05-15 15:30:47 +0100 | [diff] [blame] | 46 | /** Sin polynomial coefficients */ |
| 47 | constexpr float te_sin_coeff2 = 0.166666666666f; // 1/(2*3) |
| 48 | constexpr float te_sin_coeff3 = 0.05f; // 1/(4*5) |
| 49 | constexpr float te_sin_coeff4 = 0.023809523810f; // 1/(6*7) |
| 50 | constexpr float te_sin_coeff5 = 0.013888888889f; // 1/(8*9) |
| 51 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 52 | #ifndef DOXYGEN_SKIP_THIS |
Viet-Hoa Do | 86689cd | 2022-11-21 17:17:56 +0000 | [diff] [blame] | 53 | inline float32x4_t prefer_vfmaq_f32(float32x4_t a, float32x4_t b, float32x4_t c) |
| 54 | { |
| 55 | #ifdef __aarch64__ |
| 56 | return vfmaq_f32(a, b, c); |
| 57 | #else // __aarch64__ |
| 58 | return vmlaq_f32(a, b, c); |
| 59 | #endif // __aarch64__ |
| 60 | } |
| 61 | |
Georgios Pinitas | d8e765b | 2017-08-02 13:44:33 +0100 | [diff] [blame] | 62 | inline float32x4_t vfloorq_f32(float32x4_t val) |
| 63 | { |
| 64 | static const float32x4_t CONST_1 = vdupq_n_f32(1.f); |
| 65 | |
| 66 | const int32x4_t z = vcvtq_s32_f32(val); |
| 67 | const float32x4_t r = vcvtq_f32_s32(z); |
| 68 | |
| 69 | return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, CONST_1), r); |
| 70 | } |
| 71 | |
Usama Arif | 0a5a57a | 2019-05-23 14:20:33 +0100 | [diff] [blame] | 72 | inline float32x4_t vroundq_rte_f32(float32x4_t val) |
| 73 | { |
| 74 | #ifdef __aarch64__ |
| 75 | return vrndnq_f32(val); |
Manuel Bottini | 7bb56c6 | 2019-06-26 15:17:09 +0100 | [diff] [blame] | 76 | #else // __aarch64__ |
Usama Arif | 0a5a57a | 2019-05-23 14:20:33 +0100 | [diff] [blame] | 77 | static const float32x4_t CONST_HALF_FLOAT = vdupq_n_f32(0.5f); |
Manuel Bottini | 7bb56c6 | 2019-06-26 15:17:09 +0100 | [diff] [blame] | 78 | static const float32x4_t CONST_1_FLOAT = vdupq_n_f32(1.f); |
| 79 | static const int32x4_t CONST_1_INT = vdupq_n_s32(1); |
| 80 | const float32x4_t floor_val = vfloorq_f32(val); |
| 81 | const float32x4_t diff = vsubq_f32(val, floor_val); |
Usama Arif | 0a5a57a | 2019-05-23 14:20:33 +0100 | [diff] [blame] | 82 | |
| 83 | /* |
| 84 | * Select the floor value when (diff<0.5 || (diff==0.5 && floor_val%2==0). |
| 85 | * This condition is checked by vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT) ,vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT) , vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT),CONST_1_INT)))) |
| 86 | */ |
| 87 | |
Manuel Bottini | 7bb56c6 | 2019-06-26 15:17:09 +0100 | [diff] [blame] | 88 | return vbslq_f32(vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT), vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT), vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT), CONST_1_INT)))), |
| 89 | floor_val, vaddq_f32(floor_val, CONST_1_FLOAT)); |
Usama Arif | 0a5a57a | 2019-05-23 14:20:33 +0100 | [diff] [blame] | 90 | #endif // __aarch64__ |
| 91 | } |
| 92 | |
Georgios Pinitas | cdf5145 | 2017-08-31 14:21:36 +0100 | [diff] [blame] | 93 | inline float32x2_t vinvsqrt_f32(float32x2_t x) |
| 94 | { |
| 95 | float32x2_t sqrt_reciprocal = vrsqrte_f32(x); |
| 96 | sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 97 | sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 98 | |
| 99 | return sqrt_reciprocal; |
| 100 | } |
| 101 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 102 | inline float32x4_t vinvsqrtq_f32(float32x4_t x) |
| 103 | { |
| 104 | float32x4_t sqrt_reciprocal = vrsqrteq_f32(x); |
| 105 | sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 106 | sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 107 | |
| 108 | return sqrt_reciprocal; |
| 109 | } |
| 110 | |
Georgios Pinitas | cdf5145 | 2017-08-31 14:21:36 +0100 | [diff] [blame] | 111 | inline float32x2_t vinv_f32(float32x2_t x) |
| 112 | { |
| 113 | float32x2_t recip = vrecpe_f32(x); |
| 114 | recip = vmul_f32(vrecps_f32(x, recip), recip); |
| 115 | recip = vmul_f32(vrecps_f32(x, recip), recip); |
| 116 | return recip; |
| 117 | } |
| 118 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 119 | inline float32x4_t vinvq_f32(float32x4_t x) |
| 120 | { |
| 121 | float32x4_t recip = vrecpeq_f32(x); |
| 122 | recip = vmulq_f32(vrecpsq_f32(x, recip), recip); |
| 123 | recip = vmulq_f32(vrecpsq_f32(x, recip), recip); |
| 124 | return recip; |
| 125 | } |
| 126 | |
| 127 | inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const std::array<float32x4_t, 8> &coeffs) |
| 128 | { |
| 129 | float32x4_t A = vmlaq_f32(coeffs[0], coeffs[4], x); |
| 130 | float32x4_t B = vmlaq_f32(coeffs[2], coeffs[6], x); |
| 131 | float32x4_t C = vmlaq_f32(coeffs[1], coeffs[5], x); |
| 132 | float32x4_t D = vmlaq_f32(coeffs[3], coeffs[7], x); |
| 133 | float32x4_t x2 = vmulq_f32(x, x); |
| 134 | float32x4_t x4 = vmulq_f32(x2, x2); |
| 135 | float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4); |
| 136 | return res; |
| 137 | } |
| 138 | |
Viet-Hoa Do | 86689cd | 2022-11-21 17:17:56 +0000 | [diff] [blame] | 139 | static const uint32_t exp_f32_coeff[] = { |
| 140 | 0x3f7ffff6, // x^1: 0x1.ffffecp-1f |
| 141 | 0x3efffedb, // x^2: 0x1.fffdb6p-2f |
| 142 | 0x3e2aaf33, // x^3: 0x1.555e66p-3f |
| 143 | 0x3d2b9f17, // x^4: 0x1.573e2ep-5f |
| 144 | 0x3c072010, // x^5: 0x1.0e4020p-7f |
| 145 | }; |
| 146 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 147 | inline float32x4_t vexpq_f32(float32x4_t x) |
| 148 | { |
Viet-Hoa Do | 86689cd | 2022-11-21 17:17:56 +0000 | [diff] [blame] | 149 | const auto c1 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[0])); |
| 150 | const auto c2 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[1])); |
| 151 | const auto c3 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[2])); |
| 152 | const auto c4 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[3])); |
| 153 | const auto c5 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[4])); |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 154 | |
Viet-Hoa Do | 86689cd | 2022-11-21 17:17:56 +0000 | [diff] [blame] | 155 | const auto shift = vreinterpretq_f32_u32(vdupq_n_u32(0x4b00007f)); // 2^23 + 127 = 0x1.0000fep23f |
| 156 | const auto inv_ln2 = vreinterpretq_f32_u32(vdupq_n_u32(0x3fb8aa3b)); // 1 / ln(2) = 0x1.715476p+0f |
| 157 | const auto neg_ln2_hi = vreinterpretq_f32_u32(vdupq_n_u32(0xbf317200)); // -ln(2) from bits -1 to -19: -0x1.62e400p-1f |
| 158 | const auto neg_ln2_lo = vreinterpretq_f32_u32(vdupq_n_u32(0xb5bfbe8e)); // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 159 | |
Viet-Hoa Do | 86689cd | 2022-11-21 17:17:56 +0000 | [diff] [blame] | 160 | const auto inf = vdupq_n_f32(std::numeric_limits<float>::infinity()); |
| 161 | const auto max_input = vdupq_n_f32(88.7f); // Approximately ln(0x1.fffffep+127) |
| 162 | const auto zero = vdupq_n_f32(0.f); |
| 163 | const auto min_input = vdupq_n_f32(-86.6f); // Approximately ln(2^-125) |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 164 | |
Viet-Hoa Do | 86689cd | 2022-11-21 17:17:56 +0000 | [diff] [blame] | 165 | // Range reduction: |
| 166 | // e^x = 2^n * e^r |
| 167 | // where: |
| 168 | // n = floor(x / ln(2)) |
| 169 | // r = x - n * ln(2) |
| 170 | // |
| 171 | // By adding x / ln(2) with 2^23 + 127 (shift): |
| 172 | // * As FP32 fraction part only has 23-bits, the addition of 2^23 + 127 forces decimal part |
| 173 | // of x / ln(2) out of the result. The integer part of x / ln(2) (i.e. n) + 127 will occupy |
| 174 | // the whole fraction part of z in FP32 format. |
| 175 | // Subtracting 2^23 + 127 (shift) from z will result in the integer part of x / ln(2) |
| 176 | // (i.e. n) because the decimal part has been pushed out and lost. |
| 177 | // * The addition of 127 makes the FP32 fraction part of z ready to be used as the exponent |
| 178 | // in FP32 format. Left shifting z by 23 bits will result in 2^n. |
| 179 | const auto z = prefer_vfmaq_f32(shift, x, inv_ln2); |
| 180 | const auto n = z - shift; |
| 181 | const auto scale = vreinterpretq_f32_u32(vreinterpretq_u32_f32(z) << 23); // 2^n |
| 182 | |
| 183 | // The calculation of n * ln(2) is done using 2 steps to achieve accuracy beyond FP32. |
| 184 | // This outperforms longer Taylor series (3-4 tabs) both in term of accuracy and performance. |
| 185 | const auto r_hi = prefer_vfmaq_f32(x, n, neg_ln2_hi); |
| 186 | const auto r = prefer_vfmaq_f32(r_hi, n, neg_ln2_lo); |
| 187 | |
| 188 | // Compute the truncated Taylor series of e^r. |
| 189 | // poly = scale * (1 + c1 * r + c2 * r^2 + c3 * r^3 + c4 * r^4 + c5 * r^5) |
| 190 | const auto r2 = r * r; |
| 191 | |
| 192 | const auto p1 = c1 * r; |
| 193 | const auto p23 = prefer_vfmaq_f32(c2, c3, r); |
| 194 | const auto p45 = prefer_vfmaq_f32(c4, c5, r); |
| 195 | const auto p2345 = prefer_vfmaq_f32(p23, p45, r2); |
| 196 | const auto p12345 = prefer_vfmaq_f32(p1, p2345, r2); |
| 197 | |
| 198 | auto poly = prefer_vfmaq_f32(scale, p12345, scale); |
| 199 | |
| 200 | // Handle underflow and overflow. |
| 201 | poly = vbslq_f32(vcltq_f32(x, min_input), zero, poly); |
| 202 | poly = vbslq_f32(vcgtq_f32(x, max_input), inf, poly); |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 203 | |
| 204 | return poly; |
| 205 | } |
| 206 | |
Murray Kornelsen | 926f502 | 2022-07-13 21:22:39 -0400 | [diff] [blame] | 207 | #ifdef __aarch64__ |
| 208 | inline float32x4_t verfq_f32(float32x4_t x) |
| 209 | { |
| 210 | static const float erffdata[4] = { 0.278393f, 0.230389f, 0.000972f, 0.078108f }; |
| 211 | static const float32x4_t coeffdata = vld1q_f32(erffdata); |
| 212 | static const float32x4_t onev{ vdupq_n_f32(1.0f) }; |
| 213 | |
| 214 | uint32x4_t selector = vcltzq_f32(x); |
| 215 | |
| 216 | float32x4_t absx = vabsq_f32(x); |
| 217 | float32x4_t absx2 = vmulq_f32(x, x); |
| 218 | float32x4_t absx3 = vmulq_f32(absx2, absx); |
| 219 | float32x4_t absx4 = vmulq_f32(absx2, absx2); |
| 220 | |
| 221 | float32x4_t denom = onev; |
| 222 | denom = vfmaq_laneq_f32(denom, absx, coeffdata, 0); |
| 223 | denom = vfmaq_laneq_f32(denom, absx2, coeffdata, 1); |
| 224 | denom = vfmaq_laneq_f32(denom, absx3, coeffdata, 2); |
| 225 | denom = vfmaq_laneq_f32(denom, absx4, coeffdata, 3); |
| 226 | |
| 227 | denom = vmulq_f32(denom, denom); |
| 228 | denom = vmulq_f32(denom, denom); |
| 229 | |
| 230 | float32x4_t fract = onev; |
| 231 | fract = vdivq_f32(fract, denom); |
| 232 | |
| 233 | float32x4_t result = onev; |
| 234 | result = vsubq_f32(result, fract); |
| 235 | |
| 236 | float32x4_t inverse = vnegq_f32(result); |
| 237 | |
| 238 | result = vbslq_f32(selector, inverse, result); |
| 239 | |
| 240 | return result; |
| 241 | } |
| 242 | #endif // #ifdef __aarch64__ |
| 243 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 244 | inline float32x4_t vlogq_f32(float32x4_t x) |
| 245 | { |
| 246 | static const int32x4_t CONST_127 = vdupq_n_s32(127); // 127 |
| 247 | static const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2) |
| 248 | |
| 249 | // Extract exponent |
| 250 | int32x4_t m = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127); |
| 251 | float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23))); |
| 252 | |
| 253 | // Polynomial Approximation |
| 254 | float32x4_t poly = vtaylor_polyq_f32(val, log_tab); |
| 255 | |
| 256 | // Reconstruct |
| 257 | poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2); |
| 258 | |
| 259 | return poly; |
| 260 | } |
| 261 | |
| 262 | inline float32x4_t vtanhq_f32(float32x4_t val) |
| 263 | { |
| 264 | static const float32x4_t CONST_1 = vdupq_n_f32(1.f); |
| 265 | static const float32x4_t CONST_2 = vdupq_n_f32(2.f); |
| 266 | static const float32x4_t CONST_MIN_TANH = vdupq_n_f32(-10.f); |
| 267 | static const float32x4_t CONST_MAX_TANH = vdupq_n_f32(10.f); |
Aleksandr Nikolaev | 7e9f34d | 2021-05-04 16:46:27 +0100 | [diff] [blame] | 268 | static const float32x4_t CONST_THR = vdupq_n_f32(5.e-3); |
| 269 | static const float32x4_t CONST_1_3 = vdupq_n_f32(0.3333333f); |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 270 | |
Sheri Zhang | 5dda217 | 2021-10-15 19:54:17 +0100 | [diff] [blame] | 271 | float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH); |
Aleksandr Nikolaev | 7e9f34d | 2021-05-04 16:46:27 +0100 | [diff] [blame] | 272 | // x * (1 - x^2/3) if |x| < 5.e-3 or (exp2x - 1) / (exp2x + 1) otherwise |
| 273 | float32x4_t exp2x = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vexpq_f32(vmulq_f32(CONST_2, x)), vmulq_f32(x, x)); |
| 274 | float32x4_t num = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vsubq_f32(exp2x, CONST_1), vmulq_f32(CONST_1_3, exp2x)); |
| 275 | float32x4_t den = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vaddq_f32(exp2x, CONST_1), vsubq_f32(CONST_1, num)); |
| 276 | float32x4_t tanh = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vmulq_f32(num, vinvq_f32(den)), vmulq_f32(x, den)); |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 277 | return tanh; |
| 278 | } |
| 279 | |
| 280 | inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n) |
| 281 | { |
| 282 | return vexpq_f32(vmulq_f32(n, vlogq_f32(val))); |
| 283 | } |
Manuel Bottini | ed75326 | 2019-05-15 15:30:47 +0100 | [diff] [blame] | 284 | |
| 285 | inline float32x4_t vsinq_f32(float32x4_t val) |
| 286 | { |
| 287 | const float32x4_t pi_v = vdupq_n_f32(M_PI); |
| 288 | const float32x4_t pio2_v = vdupq_n_f32(M_PI / 2); |
| 289 | const float32x4_t ipi_v = vdupq_n_f32(1 / M_PI); |
| 290 | |
| 291 | //Find positive or negative |
| 292 | const int32x4_t c_v = vabsq_s32(vcvtq_s32_f32(vmulq_f32(val, ipi_v))); |
| 293 | const uint32x4_t sign_v = vcleq_f32(val, vdupq_n_f32(0)); |
| 294 | const uint32x4_t odd_v = vandq_u32(vreinterpretq_u32_s32(c_v), vdupq_n_u32(1)); |
| 295 | |
| 296 | uint32x4_t neg_v = veorq_u32(odd_v, sign_v); |
| 297 | |
| 298 | //Modulus a - (n * int(a*(1/n))) |
| 299 | float32x4_t ma = vsubq_f32(vabsq_f32(val), vmulq_f32(pi_v, vcvtq_f32_s32(c_v))); |
| 300 | const uint32x4_t reb_v = vcgeq_f32(ma, pio2_v); |
| 301 | |
| 302 | //Rebase a between 0 and pi/2 |
| 303 | ma = vbslq_f32(reb_v, vsubq_f32(pi_v, ma), ma); |
| 304 | |
| 305 | //Taylor series |
| 306 | const float32x4_t ma2 = vmulq_f32(ma, ma); |
| 307 | |
| 308 | //2nd elem: x^3 / 3! |
| 309 | float32x4_t elem = vmulq_f32(vmulq_f32(ma, ma2), vdupq_n_f32(te_sin_coeff2)); |
| 310 | float32x4_t res = vsubq_f32(ma, elem); |
| 311 | |
| 312 | //3rd elem: x^5 / 5! |
| 313 | elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff3)); |
| 314 | res = vaddq_f32(res, elem); |
| 315 | |
| 316 | //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val) |
| 317 | elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff4)); |
| 318 | res = vsubq_f32(res, elem); |
| 319 | |
| 320 | //5th elem: x^9 / 9! |
| 321 | elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(te_sin_coeff5)); |
| 322 | res = vaddq_f32(res, elem); |
| 323 | |
| 324 | //Change of sign |
| 325 | neg_v = vshlq_n_u32(neg_v, 31); |
| 326 | res = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(res), neg_v)); |
| 327 | return res; |
| 328 | } |
| 329 | |
| 330 | inline float32x2_t vsin_f32(float32x2_t val) |
| 331 | { |
| 332 | const float32x2_t pi_v = vdup_n_f32(M_PI); |
| 333 | const float32x2_t pio2_v = vdup_n_f32(M_PI / 2); |
| 334 | const float32x2_t ipi_v = vdup_n_f32(1 / M_PI); |
| 335 | |
| 336 | //Find positive or negative |
| 337 | const int32x2_t c_v = vabs_s32(vcvt_s32_f32(vmul_f32(val, ipi_v))); |
| 338 | const uint32x2_t sign_v = vcle_f32(val, vdup_n_f32(0)); |
| 339 | const uint32x2_t odd_v = vand_u32(vreinterpret_u32_s32(c_v), vdup_n_u32(1)); |
| 340 | |
| 341 | uint32x2_t neg_v = veor_u32(odd_v, sign_v); |
| 342 | |
| 343 | //Modulus a - (n * int(a*(1/n))) |
| 344 | float32x2_t ma = vsub_f32(vabs_f32(val), vmul_f32(pi_v, vcvt_f32_s32(c_v))); |
| 345 | const uint32x2_t reb_v = vcge_f32(ma, pio2_v); |
| 346 | |
| 347 | //Rebase a between 0 and pi/2 |
| 348 | ma = vbsl_f32(reb_v, vsub_f32(pi_v, ma), ma); |
| 349 | |
| 350 | //Taylor series |
| 351 | const float32x2_t ma2 = vmul_f32(ma, ma); |
| 352 | |
| 353 | //2nd elem: x^3 / 3! |
| 354 | float32x2_t elem = vmul_f32(vmul_f32(ma, ma2), vdup_n_f32(te_sin_coeff2)); |
| 355 | float32x2_t res = vsub_f32(ma, elem); |
| 356 | |
| 357 | //3rd elem: x^5 / 5! |
| 358 | elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff3)); |
| 359 | res = vadd_f32(res, elem); |
| 360 | |
| 361 | //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val) |
| 362 | elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff4)); |
| 363 | res = vsub_f32(res, elem); |
| 364 | |
| 365 | //5th elem: x^9 / 9! |
| 366 | elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(te_sin_coeff5)); |
| 367 | res = vadd_f32(res, elem); |
| 368 | |
| 369 | //Change of sign |
| 370 | neg_v = vshl_n_u32(neg_v, 31); |
| 371 | res = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(res), neg_v)); |
| 372 | return res; |
| 373 | } |
| 374 | |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 375 | #endif /* DOXYGEN_SKIP_THIS */ |
| 376 | |
Georgios Pinitas | dbdea0d | 2019-10-16 19:21:40 +0100 | [diff] [blame] | 377 | inline int32x4_t rounding_divide_by_pow2(int32x4_t x, int32x4_t exponent) |
| 378 | { |
| 379 | const int32x4_t shift_vec = vnegq_s32(exponent); |
| 380 | const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31); |
| 381 | const int32x4_t fixed_up_x = vqaddq_s32(x, fixup); |
| 382 | return vrshlq_s32(fixed_up_x, shift_vec); |
| 383 | } |
| 384 | |
Manuel Bottini | 7bb56c6 | 2019-06-26 15:17:09 +0100 | [diff] [blame] | 385 | inline int32x4_t rounding_divide_by_pow2(int32x4_t x, int exponent) |
| 386 | { |
| 387 | const int32x4_t shift_vec = vdupq_n_s32(-exponent); |
| 388 | const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31); |
| 389 | const int32x4_t fixed_up_x = vqaddq_s32(x, fixup); |
| 390 | return vrshlq_s32(fixed_up_x, shift_vec); |
| 391 | } |
| 392 | |
| 393 | inline int32_t rounding_divide_by_pow2(int32_t x, int exponent) |
| 394 | { |
| 395 | const int32_t mask = (1 << exponent) - 1; |
| 396 | const int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0); |
| 397 | return (x >> exponent) + ((x & mask) > threshold ? 1 : 0); |
| 398 | } |
| 399 | |
Manuel Bottini | 21079dd | 2019-10-29 17:20:09 +0000 | [diff] [blame] | 400 | inline float32x4x4_t convert_uint8x16_to_float32x4x4(const uint8x16_t &in) |
| 401 | { |
| 402 | float32x4x4_t out; |
| 403 | |
| 404 | const auto tmp1 = vmovl_u8(vget_low_u8(in)); |
| 405 | out.val[0] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1))); |
| 406 | out.val[1] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1))); |
| 407 | |
| 408 | const auto tmp2 = vmovl_u8(vget_high_u8(in)); |
| 409 | out.val[2] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp2))); |
| 410 | out.val[3] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp2))); |
| 411 | return out; |
| 412 | } |
| 413 | |
Sang-Hoon Park | c3a7420 | 2019-11-22 16:05:46 +0000 | [diff] [blame] | 414 | inline float32x4x4_t convert_int8x16_to_float32x4x4(const int8x16_t &in) |
| 415 | { |
| 416 | float32x4x4_t out; |
| 417 | |
| 418 | const auto tmp1 = vmovl_s8(vget_low_s8(in)); |
| 419 | out.val[0] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp1))); |
| 420 | out.val[1] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp1))); |
| 421 | |
| 422 | const auto tmp2 = vmovl_s8(vget_high_s8(in)); |
| 423 | out.val[2] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp2))); |
| 424 | out.val[3] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp2))); |
| 425 | return out; |
| 426 | } |
| 427 | |
Manuel Bottini | 4370cff | 2020-02-07 16:31:59 +0000 | [diff] [blame] | 428 | template <> |
| 429 | inline float32x4x4_t convert_to_float32x4x4(const uint8x16_t &in) |
| 430 | { |
| 431 | return convert_uint8x16_to_float32x4x4(in); |
| 432 | } |
| 433 | |
| 434 | template <> |
| 435 | inline float32x4x4_t convert_to_float32x4x4(const int8x16_t &in) |
| 436 | { |
| 437 | return convert_int8x16_to_float32x4x4(in); |
| 438 | } |
| 439 | |
Manuel Bottini | 21079dd | 2019-10-29 17:20:09 +0000 | [diff] [blame] | 440 | inline void convert_float32x4x3_to_uint8x8x3(const float32x4x3_t &in1, const float32x4x3_t &in2, uint8x8x3_t &out) |
| 441 | { |
| 442 | out.val[0] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[0])), |
| 443 | vqmovn_u32(vcvtq_u32_f32(in2.val[0])))); |
| 444 | out.val[1] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[1])), |
| 445 | vqmovn_u32(vcvtq_u32_f32(in2.val[1])))); |
| 446 | out.val[2] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[2])), |
| 447 | vqmovn_u32(vcvtq_u32_f32(in2.val[2])))); |
| 448 | } |
| 449 | |
Sang-Hoon Park | c3a7420 | 2019-11-22 16:05:46 +0000 | [diff] [blame] | 450 | inline void convert_float32x4x4_to_uint8x16(const float32x4x4_t &in, uint8x16_t &out) |
Manuel Bottini | 21079dd | 2019-10-29 17:20:09 +0000 | [diff] [blame] | 451 | { |
| 452 | const auto low = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[0])), |
| 453 | vqmovn_u32(vcvtq_u32_f32(in.val[1]))); |
| 454 | const auto high = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[2])), |
| 455 | vqmovn_u32(vcvtq_u32_f32(in.val[3]))); |
| 456 | out = vcombine_u8(vqmovn_u16(low), vqmovn_u16(high)); |
| 457 | } |
| 458 | |
Sang-Hoon Park | c3a7420 | 2019-11-22 16:05:46 +0000 | [diff] [blame] | 459 | inline void convert_float32x4x4_to_int8x16(const float32x4x4_t &in, int8x16_t &out) |
| 460 | { |
| 461 | const auto low = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[0])), |
| 462 | vqmovn_s32(vcvtq_s32_f32(in.val[1]))); |
| 463 | const auto high = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[2])), |
| 464 | vqmovn_s32(vcvtq_s32_f32(in.val[3]))); |
| 465 | out = vcombine_s8(vqmovn_s16(low), vqmovn_s16(high)); |
| 466 | } |
| 467 | |
Sang-Hoon Park | dcf3c7e | 2021-03-04 17:03:46 +0000 | [diff] [blame] | 468 | template <> |
| 469 | inline uint8x16_t convert_float_to_int<float32x4x4_t, uint8x16_t>(const float32x4x4_t &in) |
| 470 | { |
| 471 | uint8x16_t out; |
| 472 | convert_float32x4x4_to_uint8x16(in, out); |
| 473 | return out; |
| 474 | } |
| 475 | |
| 476 | template <> |
| 477 | inline float32x4x4_t convert_int_to_float<float32x4x4_t, uint8x16_t>(const uint8x16_t &in) |
| 478 | { |
| 479 | return convert_uint8x16_to_float32x4x4(in); |
| 480 | } |
| 481 | |
| 482 | template <> |
| 483 | inline int8x16_t convert_float_to_int<float32x4x4_t, int8x16_t>(const float32x4x4_t &in) |
| 484 | { |
| 485 | int8x16_t out; |
| 486 | convert_float32x4x4_to_int8x16(in, out); |
| 487 | return out; |
| 488 | } |
| 489 | |
| 490 | template <> |
| 491 | inline float32x4x4_t convert_int_to_float<float32x4x4_t, int8x16_t>(const int8x16_t &in) |
| 492 | { |
| 493 | return convert_int8x16_to_float32x4x4(in); |
| 494 | } |
| 495 | |
Sheri Zhang | 5dda217 | 2021-10-15 19:54:17 +0100 | [diff] [blame] | 496 | inline float vreduce(const float32x4_t &v) |
| 497 | { |
| 498 | const float32x2_t v0 = vget_high_f32(v); |
| 499 | const float32x2_t v1 = vget_low_f32(v); |
| 500 | const float32x2_t v_out = vadd_f32(v0, v1); |
| 501 | |
| 502 | const float a = vget_lane_f32(v_out, 0); |
| 503 | const float b = vget_lane_f32(v_out, 1); |
| 504 | |
| 505 | return a + b; |
| 506 | } |
| 507 | |
Ioan-Cristian Szabo | 5edbd1c | 2017-11-13 13:34:08 +0000 | [diff] [blame] | 508 | #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 509 | /** Exponent polynomial coefficients */ |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 510 | /** Logarithm polynomial coefficients */ |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 511 | #ifndef DOXYGEN_SKIP_THIS |
Georgios Pinitas | 565bf2d | 2018-08-31 11:46:49 +0100 | [diff] [blame] | 512 | inline float16x8_t vfloorq_f16(float16x8_t val) |
| 513 | { |
| 514 | static const float16x8_t CONST_1 = vdupq_n_f16(1.f); |
| 515 | |
| 516 | const int16x8_t z = vcvtq_s16_f16(val); |
| 517 | const float16x8_t r = vcvtq_f16_s16(z); |
| 518 | |
| 519 | return vbslq_f16(vcgtq_f16(r, val), vsubq_f16(r, CONST_1), r); |
| 520 | } |
Usama Arif | 0a5a57a | 2019-05-23 14:20:33 +0100 | [diff] [blame] | 521 | |
| 522 | inline float16x8_t vroundq_rte_f16(float16x8_t val) |
| 523 | { |
| 524 | return vrndnq_f16(val); |
| 525 | } |
| 526 | |
Georgios Pinitas | cdf5145 | 2017-08-31 14:21:36 +0100 | [diff] [blame] | 527 | inline float16x4_t vinvsqrt_f16(float16x4_t x) |
| 528 | { |
| 529 | float16x4_t sqrt_reciprocal = vrsqrte_f16(x); |
| 530 | sqrt_reciprocal = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 531 | sqrt_reciprocal = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 532 | return sqrt_reciprocal; |
| 533 | } |
| 534 | |
Pablo Tello | 91654c4 | 2017-07-05 11:32:17 +0100 | [diff] [blame] | 535 | inline float16x8_t vinvsqrtq_f16(float16x8_t x) |
| 536 | { |
| 537 | float16x8_t sqrt_reciprocal = vrsqrteq_f16(x); |
| 538 | sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
| 539 | sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal); |
Pablo Tello | 91654c4 | 2017-07-05 11:32:17 +0100 | [diff] [blame] | 540 | return sqrt_reciprocal; |
| 541 | } |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 542 | |
Georgios Pinitas | cdf5145 | 2017-08-31 14:21:36 +0100 | [diff] [blame] | 543 | inline float16x4_t vinv_f16(float16x4_t x) |
| 544 | { |
| 545 | float16x4_t recip = vrecpe_f16(x); |
| 546 | recip = vmul_f16(vrecps_f16(x, recip), recip); |
| 547 | recip = vmul_f16(vrecps_f16(x, recip), recip); |
| 548 | return recip; |
| 549 | } |
| 550 | |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 551 | inline float16x8_t vinvq_f16(float16x8_t x) |
| 552 | { |
| 553 | float16x8_t recip = vrecpeq_f16(x); |
| 554 | recip = vmulq_f16(vrecpsq_f16(x, recip), recip); |
| 555 | recip = vmulq_f16(vrecpsq_f16(x, recip), recip); |
| 556 | return recip; |
| 557 | } |
| 558 | |
Jonathan Deakin | 2bc8cfe | 2022-10-13 10:50:25 +0000 | [diff] [blame] | 559 | inline float16x4_t vtanh_rational_approx_f16(float16x4_t x16) |
Pablo Tello | 91654c4 | 2017-07-05 11:32:17 +0100 | [diff] [blame] | 560 | { |
Jonathan Deakin | 2bc8cfe | 2022-10-13 10:50:25 +0000 | [diff] [blame] | 561 | // Calculate rational approximation part of tanh exactly on a half-register of F16 by using F32s |
| 562 | // Note: doesn't handle overflows, needs truncating at |x| = 4.508 |
| 563 | const float32x4_t x = vcvt_f32_f16(x16); |
| 564 | |
| 565 | const float32x4_t ONE = vdupq_n_f32(1.0f); |
| 566 | const float32x4_t C1 = vdupq_n_f32(0.43760237f); |
| 567 | const float32x4_t C2 = vdupq_n_f32(0.104402f); |
| 568 | const float32x4_t C3 = vdupq_n_f32(0.013442706f); |
| 569 | const float32x4_t C4 = vdupq_n_f32(0.00073561433f); |
| 570 | |
| 571 | const float32x4_t x2 = vmulq_f32(x,x); |
| 572 | |
| 573 | // Denominator polynomial 1 + C1*x^2 + C3*x^4 |
| 574 | float32x4_t denom = vfmaq_f32(C1, C3, x2); |
| 575 | denom = vfmaq_f32(ONE, x2, denom); |
| 576 | |
| 577 | // Numerator polynomial x*(1 + C2*x^2 + C4*x^4) |
| 578 | float32x4_t numer = vfmaq_f32(C2, C4, x2); |
| 579 | numer = vfmaq_f32(ONE, x2, numer); |
| 580 | numer = vmulq_f32(numer, x); |
| 581 | |
| 582 | return vcvt_f16_f32(vdivq_f32(numer, denom)); |
| 583 | } |
| 584 | |
| 585 | inline float16x8_t vtanhq_f16(float16x8_t x) |
| 586 | { |
| 587 | // Split into high/low and use rational approximation on both parts exactly |
| 588 | const float16x8_t tanh = vcombine_f16(vtanh_rational_approx_f16( vget_low_f16(x)), |
| 589 | vtanh_rational_approx_f16(vget_high_f16(x))); |
| 590 | |
| 591 | // tanh(x) == sign(x) to F16 precision for |x| >= 4.508, use sign after this |
| 592 | const float16x8_t ONE = vdupq_n_f16(1.0f); |
| 593 | const float16x8_t MAX_X = vdupq_n_f16(4.508f); |
| 594 | const auto at_limit = vcageq_f16(x, MAX_X); // |x| >= 4.508 |
| 595 | const float16x8_t sign_x = vbslq_f16(vclezq_f16(x), -ONE, ONE); |
| 596 | return vbslq_f16(at_limit, sign_x, tanh); |
Pablo Tello | 91654c4 | 2017-07-05 11:32:17 +0100 | [diff] [blame] | 597 | } |
| 598 | |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 599 | inline float16x8_t vtaylor_polyq_f16(float16x8_t x, const std::array<float16x8_t, 8> &coeffs) |
| 600 | { |
| 601 | const float16x8_t A = vaddq_f16(coeffs[0], vmulq_f16(coeffs[4], x)); |
| 602 | const float16x8_t B = vaddq_f16(coeffs[2], vmulq_f16(coeffs[6], x)); |
| 603 | const float16x8_t C = vaddq_f16(coeffs[1], vmulq_f16(coeffs[5], x)); |
| 604 | const float16x8_t D = vaddq_f16(coeffs[3], vmulq_f16(coeffs[7], x)); |
| 605 | const float16x8_t x2 = vmulq_f16(x, x); |
| 606 | const float16x8_t x4 = vmulq_f16(x2, x2); |
| 607 | const float16x8_t res = vaddq_f16(vaddq_f16(A, vmulq_f16(B, x2)), vmulq_f16(vaddq_f16(C, vmulq_f16(D, x2)), x4)); |
| 608 | return res; |
| 609 | } |
| 610 | |
| 611 | inline float16x8_t vexpq_f16(float16x8_t x) |
| 612 | { |
Michele Di Giorgio | 1c948d4 | 2018-11-20 16:03:01 +0000 | [diff] [blame] | 613 | const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x)); |
| 614 | const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x)); |
Anthony Barbier | 3a6163e | 2018-08-10 17:36:36 +0100 | [diff] [blame] | 615 | |
Georgios Pinitas | f2cdce3 | 2019-12-09 18:35:57 +0000 | [diff] [blame] | 616 | const float16x8_t res = vcombine_f16(vcvt_f16_f32(vexpq_f32(x_low)), vcvt_f16_f32(vexpq_f32(x_high))); |
Michele Di Giorgio | 1c948d4 | 2018-11-20 16:03:01 +0000 | [diff] [blame] | 617 | return res; |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 618 | } |
| 619 | |
Murray Kornelsen | 926f502 | 2022-07-13 21:22:39 -0400 | [diff] [blame] | 620 | #ifdef __aarch64__ |
| 621 | inline float16x8_t verfq_f16(float16x8_t x) |
| 622 | { |
| 623 | const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x)); |
| 624 | const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x)); |
| 625 | |
| 626 | const float16x8_t res = vcombine_f16(vcvt_f16_f32(verfq_f32(x_low)), vcvt_f16_f32(verfq_f32(x_high))); |
| 627 | return res; |
| 628 | } |
| 629 | #endif // #ifdef __aarch64__ |
| 630 | |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 631 | inline float16x8_t vlogq_f16(float16x8_t x) |
| 632 | { |
Georgios Pinitas | 5a59453 | 2018-12-03 14:30:05 +0000 | [diff] [blame] | 633 | const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x)); |
| 634 | const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x)); |
Anthony Barbier | 3a6163e | 2018-08-10 17:36:36 +0100 | [diff] [blame] | 635 | |
Georgios Pinitas | f2cdce3 | 2019-12-09 18:35:57 +0000 | [diff] [blame] | 636 | const float16x8_t res = vcombine_f16(vcvt_f16_f32(vlogq_f32(x_low)), vcvt_f16_f32(vlogq_f32(x_high))); |
Georgios Pinitas | 5a59453 | 2018-12-03 14:30:05 +0000 | [diff] [blame] | 637 | return res; |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 638 | } |
| 639 | |
| 640 | inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n) |
| 641 | { |
Gian Marco Iodice | f2cde9b | 2018-08-23 15:29:16 +0100 | [diff] [blame] | 642 | float32x4_t n0_f32 = vcvt_f32_f16(vget_low_f16(n)); |
| 643 | float32x4_t n1_f32 = vcvt_f32_f16(vget_high_f16(n)); |
| 644 | float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val)); |
| 645 | float32x4_t val1_f32 = vcvt_f32_f16(vget_high_f16(val)); |
| 646 | |
| 647 | float32x4_t res0_f32 = vexpq_f32(vmulq_f32(n0_f32, vlogq_f32(val0_f32))); |
| 648 | float32x4_t res1_f32 = vexpq_f32(vmulq_f32(n1_f32, vlogq_f32(val1_f32))); |
| 649 | |
| 650 | return vcombine_f16(vcvt_f16_f32(res0_f32), vcvt_f16_f32(res1_f32)); |
Pablo Tello | df24618 | 2017-07-03 16:25:09 +0100 | [diff] [blame] | 651 | } |
Manuel Bottini | ed75326 | 2019-05-15 15:30:47 +0100 | [diff] [blame] | 652 | |
| 653 | inline float16x8_t vsinq_f16(float16x8_t val) |
| 654 | { |
| 655 | const float32x4_t val_high = vcvt_f32_f16(vget_high_f16(val)); |
| 656 | const float32x4_t val_low = vcvt_f32_f16(vget_low_f16(val)); |
| 657 | |
| 658 | const float32x4_t res_high = vsinq_f32(val_high); |
| 659 | const float32x4_t res_low = vsinq_f32(val_low); |
| 660 | |
| 661 | return vcombine_f16(vcvt_f16_f32(res_low), vcvt_f16_f32(res_high)); |
| 662 | } |
| 663 | |
| 664 | inline float16x4_t vsin_f16(float16x4_t val) |
| 665 | { |
| 666 | const float32x4_t val_f32 = vcvt_f32_f16(val); |
| 667 | const float32x2_t val_high = vget_high_f32(val_f32); |
| 668 | const float32x2_t val_low = vget_low_f32(val_f32); |
| 669 | |
| 670 | const float32x2_t res_high = vsin_f32(val_high); |
| 671 | const float32x2_t res_low = vsin_f32(val_low); |
| 672 | |
| 673 | return vcvt_f16_f32(vcombine_f32(res_low, res_high)); |
| 674 | } |
| 675 | |
Sheri Zhang | 5dda217 | 2021-10-15 19:54:17 +0100 | [diff] [blame] | 676 | inline float16_t vreduce(const float16x8_t &v) |
| 677 | { |
| 678 | const float16x4_t v0 = vget_high_f16(v); |
| 679 | const float16x4_t v1 = vget_low_f16(v); |
| 680 | const float16x4_t v_out = vadd_f16(v0, v1); |
| 681 | |
| 682 | const float16_t a = vget_lane_f16(v_out, 0); |
| 683 | const float16_t b = vget_lane_f16(v_out, 1); |
| 684 | const float16_t c = vget_lane_f16(v_out, 2); |
| 685 | const float16_t d = vget_lane_f16(v_out, 3); |
| 686 | |
| 687 | return a + b + c + d; |
| 688 | } |
Alex Gilday | c357c47 | 2018-03-21 13:54:09 +0000 | [diff] [blame] | 689 | #endif /* DOXYGEN_SKIP_THIS */ |
Ioan-Cristian Szabo | 5edbd1c | 2017-11-13 13:34:08 +0000 | [diff] [blame] | 690 | #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ |
Gian Marco Iodice | 356f643 | 2017-09-22 11:32:21 +0100 | [diff] [blame] | 691 | } // namespace arm_compute |