Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 1 | /* |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 2 | * Copyright (c) 2017-2020, 2023 Arm Limited. |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 24 | |
| 25 | #include "arm_compute/core/Rounding.h" |
| 26 | |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 27 | namespace arm_compute |
| 28 | { |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 29 | template <RoundingPolicy round_policy> |
Michel Iwaniec | 5dfeae6 | 2017-11-29 10:48:23 +0000 | [diff] [blame] | 30 | inline qasymm8x16_t vmlaq_qasymm8(qasymm8x16_t vd, float32x4_t vs, float32x4_t vo) |
| 31 | { |
| 32 | // Convert uint8 vectors to uint16 vectors |
| 33 | const uint8x8_t vd_low = vget_low_u8(vd); |
| 34 | const uint8x8_t vd_high = vget_high_u8(vd); |
| 35 | uint16x8_t vd_low_u16x8 = vmovl_u8(vd_low); |
| 36 | uint16x8_t vd_high_u16x8 = vmovl_u8(vd_high); |
| 37 | // Convert uint16 vectors to uint32 vectors |
| 38 | uint32x4_t A_u32x4 = vmovl_u16(vget_low_u16(vd_low_u16x8)); |
| 39 | uint32x4_t B_u32x4 = vmovl_u16(vget_high_u16(vd_low_u16x8)); |
| 40 | uint32x4_t C_u32x4 = vmovl_u16(vget_low_u16(vd_high_u16x8)); |
| 41 | uint32x4_t D_u32x4 = vmovl_u16(vget_high_u16(vd_high_u16x8)); |
| 42 | // Convert uint32 vectors to float32 vectors |
| 43 | float32x4_t A_f32x4 = vcvtq_f32_u32(A_u32x4); |
| 44 | float32x4_t B_f32x4 = vcvtq_f32_u32(B_u32x4); |
| 45 | float32x4_t C_f32x4 = vcvtq_f32_u32(C_u32x4); |
| 46 | float32x4_t D_f32x4 = vcvtq_f32_u32(D_u32x4); |
| 47 | // vd = vd*vs + vo |
| 48 | A_f32x4 = vmlaq_f32(vo, A_f32x4, vs); |
| 49 | B_f32x4 = vmlaq_f32(vo, B_f32x4, vs); |
| 50 | C_f32x4 = vmlaq_f32(vo, C_f32x4, vs); |
| 51 | D_f32x4 = vmlaq_f32(vo, D_f32x4, vs); |
| 52 | // Convert float32 vectors to uint32 vectors |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 53 | #if __aarch64__ |
Felix Thomasmathibalan | afd38f0 | 2023-09-27 17:46:17 +0100 | [diff] [blame] | 54 | if (round_policy == RoundingPolicy::TO_NEAREST_EVEN) |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 55 | { |
| 56 | A_u32x4 = vcvtnq_u32_f32(A_f32x4); |
| 57 | B_u32x4 = vcvtnq_u32_f32(B_f32x4); |
| 58 | C_u32x4 = vcvtnq_u32_f32(C_f32x4); |
| 59 | D_u32x4 = vcvtnq_u32_f32(D_f32x4); |
| 60 | } |
Felix Thomasmathibalan | afd38f0 | 2023-09-27 17:46:17 +0100 | [diff] [blame] | 61 | else if (round_policy == RoundingPolicy::TO_NEAREST_UP) |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 62 | { |
| 63 | A_u32x4 = vcvtaq_u32_f32(A_f32x4); |
| 64 | B_u32x4 = vcvtaq_u32_f32(B_f32x4); |
| 65 | C_u32x4 = vcvtaq_u32_f32(C_f32x4); |
| 66 | D_u32x4 = vcvtaq_u32_f32(D_f32x4); |
| 67 | } |
| 68 | else |
| 69 | { |
| 70 | A_u32x4 = vcvtq_u32_f32(A_f32x4); |
| 71 | B_u32x4 = vcvtq_u32_f32(B_f32x4); |
| 72 | C_u32x4 = vcvtq_u32_f32(C_f32x4); |
| 73 | D_u32x4 = vcvtq_u32_f32(D_f32x4); |
| 74 | } |
| 75 | #else // #if __aarch64__ |
| 76 | // rounding mode only supported in aarch64 |
Michel Iwaniec | 5dfeae6 | 2017-11-29 10:48:23 +0000 | [diff] [blame] | 77 | A_u32x4 = vcvtq_u32_f32(A_f32x4); |
| 78 | B_u32x4 = vcvtq_u32_f32(B_f32x4); |
| 79 | C_u32x4 = vcvtq_u32_f32(C_f32x4); |
| 80 | D_u32x4 = vcvtq_u32_f32(D_f32x4); |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 81 | #endif // #if __aarch64__ |
Michel Iwaniec | 5dfeae6 | 2017-11-29 10:48:23 +0000 | [diff] [blame] | 82 | // Convert uint32 vectors to uint16 vectors (with saturation) |
| 83 | vd_low_u16x8 = vcombine_u16(vqmovn_u32(A_u32x4), vqmovn_u32(B_u32x4)); |
| 84 | vd_high_u16x8 = vcombine_u16(vqmovn_u32(C_u32x4), vqmovn_u32(D_u32x4)); |
| 85 | // convert uint16 vectors to uint8 vectors (with saturation) |
| 86 | return vcombine_u8(vqmovn_u16(vd_low_u16x8), vqmovn_u16(vd_high_u16x8)); |
| 87 | } |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 88 | |
Felix Thomasmathibalan | afd38f0 | 2023-09-27 17:46:17 +0100 | [diff] [blame] | 89 | template <RoundingPolicy round_policy> |
Michalis Spyrou | 8d4d1b8 | 2019-11-28 11:31:23 +0000 | [diff] [blame] | 90 | inline qasymm8x16_signed_t vmlaq_qasymm8_signed(qasymm8x16_signed_t vd, float32x4_t vs, float32x4_t vo) |
| 91 | { |
| 92 | // Convert uint8 vectors to int16 vectors |
| 93 | const int8x8_t vd_low = vget_low_s8(vd); |
| 94 | const int8x8_t vd_high = vget_high_s8(vd); |
| 95 | int16x8_t vd_low_s16x8 = vmovl_s8(vd_low); |
| 96 | int16x8_t vd_high_s16x8 = vmovl_s8(vd_high); |
| 97 | // Convert int16 vectors to int32 vectors |
| 98 | int32x4_t A_s32x4 = vmovl_s16(vget_low_s16(vd_low_s16x8)); |
| 99 | int32x4_t B_s32x4 = vmovl_s16(vget_high_s16(vd_low_s16x8)); |
| 100 | int32x4_t C_s32x4 = vmovl_s16(vget_low_s16(vd_high_s16x8)); |
| 101 | int32x4_t D_s32x4 = vmovl_s16(vget_high_s16(vd_high_s16x8)); |
| 102 | // Convert int32 vectors to float32 vectors |
| 103 | float32x4_t A_f32x4 = vcvtq_f32_s32(A_s32x4); |
| 104 | float32x4_t B_f32x4 = vcvtq_f32_s32(B_s32x4); |
| 105 | float32x4_t C_f32x4 = vcvtq_f32_s32(C_s32x4); |
| 106 | float32x4_t D_f32x4 = vcvtq_f32_s32(D_s32x4); |
| 107 | // vd = vd*vs + vo |
| 108 | A_f32x4 = vmlaq_f32(vo, A_f32x4, vs); |
| 109 | B_f32x4 = vmlaq_f32(vo, B_f32x4, vs); |
| 110 | C_f32x4 = vmlaq_f32(vo, C_f32x4, vs); |
| 111 | D_f32x4 = vmlaq_f32(vo, D_f32x4, vs); |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 112 | #if __aarch64__ |
Felix Thomasmathibalan | afd38f0 | 2023-09-27 17:46:17 +0100 | [diff] [blame] | 113 | if (round_policy == RoundingPolicy::TO_NEAREST_EVEN) |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 114 | { |
| 115 | A_s32x4 = vcvtnq_s32_f32(A_f32x4); |
| 116 | B_s32x4 = vcvtnq_s32_f32(B_f32x4); |
| 117 | C_s32x4 = vcvtnq_s32_f32(C_f32x4); |
| 118 | D_s32x4 = vcvtnq_s32_f32(D_f32x4); |
| 119 | } |
Felix Thomasmathibalan | afd38f0 | 2023-09-27 17:46:17 +0100 | [diff] [blame] | 120 | else if (round_policy == RoundingPolicy::TO_NEAREST_UP) |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 121 | { |
| 122 | A_s32x4 = vcvtaq_s32_f32(A_f32x4); |
| 123 | B_s32x4 = vcvtaq_s32_f32(B_f32x4); |
| 124 | C_s32x4 = vcvtaq_s32_f32(C_f32x4); |
| 125 | D_s32x4 = vcvtaq_s32_f32(D_f32x4); |
| 126 | } |
| 127 | else |
| 128 | { |
| 129 | A_s32x4 = vcvtq_s32_f32(A_f32x4); |
| 130 | B_s32x4 = vcvtq_s32_f32(B_f32x4); |
| 131 | C_s32x4 = vcvtq_s32_f32(C_f32x4); |
| 132 | D_s32x4 = vcvtq_s32_f32(D_f32x4); |
| 133 | } |
| 134 | #else // #if __aarch64__ |
| 135 | // rounding mode only supported in aarch64 |
Michalis Spyrou | 8d4d1b8 | 2019-11-28 11:31:23 +0000 | [diff] [blame] | 136 | A_s32x4 = vcvtq_s32_f32(A_f32x4); |
| 137 | B_s32x4 = vcvtq_s32_f32(B_f32x4); |
| 138 | C_s32x4 = vcvtq_s32_f32(C_f32x4); |
| 139 | D_s32x4 = vcvtq_s32_f32(D_f32x4); |
Pablo Marquez Tello | 20cfa45 | 2023-03-20 16:29:21 +0000 | [diff] [blame] | 140 | #endif // #if __aarch64__ |
| 141 | |
Michalis Spyrou | 8d4d1b8 | 2019-11-28 11:31:23 +0000 | [diff] [blame] | 142 | // Convert int32 vectors to int16 vectors (with saturation) |
| 143 | vd_low_s16x8 = vcombine_s16(vqmovn_s32(A_s32x4), vqmovn_s32(B_s32x4)); |
| 144 | vd_high_s16x8 = vcombine_s16(vqmovn_s32(C_s32x4), vqmovn_s32(D_s32x4)); |
| 145 | // convert int16 vectors to int8 vectors (with saturation) |
| 146 | return vcombine_s8(vqmovn_s16(vd_low_s16x8), vqmovn_s16(vd_high_s16x8)); |
| 147 | } |
Michel Iwaniec | 5dfeae6 | 2017-11-29 10:48:23 +0000 | [diff] [blame] | 148 | } // namespace arm_compute |