Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 1 | /* |
Matthew Bentham | f1aeab9 | 2023-05-30 13:35:34 +0000 | [diff] [blame^] | 2 | * Copyright (c) 2020-2023 Arm Limited. |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
Matthew Bentham | f1aeab9 | 2023-05-30 13:35:34 +0000 | [diff] [blame^] | 24 | #include "arm_compute/core/ActivationLayerInfo.h" |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 25 | #include "arm_compute/core/Helpers.h" |
| 26 | #include "arm_compute/core/Window.h" |
| 27 | #include "src/core/NEON/wrapper/wrapper.h" |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 28 | |
| 29 | #include <cmath> |
| 30 | #include <cstddef> |
| 31 | |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 32 | #include "src/core/NEON/SVEAsymm.h" |
| 33 | #include "src/core/NEON/SVEMath.h" |
| 34 | #include <arm_sve.h> |
| 35 | |
| 36 | namespace arm_compute |
| 37 | { |
| 38 | namespace cpu |
| 39 | { |
Dana Zlotnik | 3229171 | 2021-11-25 09:58:27 +0200 | [diff] [blame] | 40 | void sve2_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window) |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 41 | { |
| 42 | const auto window_start_x = static_cast<int>(window.x().start()); |
| 43 | const auto window_end_x = static_cast<int>(window.x().end()); |
| 44 | const ActivationLayerInfo::ActivationFunction act = act_info.activation(); |
| 45 | |
| 46 | Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); |
| 47 | win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 48 | |
| 49 | Iterator input(src, win_collapsed); |
| 50 | Iterator output(dst, win_collapsed); |
| 51 | |
| 52 | const UniformQuantizationInfo qi_in = src->info()->quantization_info().uniform(); |
| 53 | const UniformQuantizationInfo qi_out = dst->info()->quantization_info().uniform(); |
| 54 | const auto va = svdup_n_s8(quantize_qasymm8_signed(act_info.a(), qi_in)); |
| 55 | const auto vb = svdup_n_s8(quantize_qasymm8_signed(act_info.b(), qi_in)); |
| 56 | const auto const_0 = quantize_qasymm8_signed(0.f, qi_in); |
| 57 | const auto vconst_0 = svdup_n_s8(const_0); |
| 58 | const auto vconst_1 = svdup_n_f32(1.f); |
| 59 | const auto va_f32 = svdup_n_f32(act_info.a()); |
| 60 | const auto vb_f32 = svdup_n_f32(act_info.b()); |
| 61 | const auto const_6_f32 = svdup_n_f32(6.f); |
| 62 | const auto const_0_f32 = svdup_n_f32(0.f); |
| 63 | const auto const_3_f32 = svdup_n_f32(3.f); |
| 64 | const auto const_inv_6_f32 = svdup_n_f32(0.166666667f); |
| 65 | |
| 66 | // Initialise scale/offset for re-quantization |
| 67 | bool requant = true; |
| 68 | if(qi_in.scale == qi_out.scale && qi_in.offset == qi_out.offset) |
| 69 | { |
| 70 | requant = false; |
| 71 | } |
| 72 | float s = qi_in.scale / qi_out.scale; |
| 73 | float o = -qi_in.offset * s + qi_out.offset; |
| 74 | auto vs = svdup_n_f32(s); |
| 75 | auto vo = svdup_n_f32(o); |
| 76 | |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 77 | // Initialise scale/offset for re-quantization with int32_t |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 78 | const auto voffset_in = svdup_n_s32(qi_in.offset); |
| 79 | int32_t s_s32 = round(s * (1 << 8), arm_compute::RoundingPolicy::TO_NEAREST_EVEN); |
| 80 | int32_t o_s32 = round(o * (1 << 8), arm_compute::RoundingPolicy::TO_NEAREST_EVEN); |
| 81 | const auto vs_s32 = svdup_n_s32(s_s32); |
| 82 | const auto vo_s32 = svdup_n_s32(o_s32); |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 83 | |
| 84 | // Initialise scale/offset for re-quantization for leaky relu |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 85 | int32_t s_leaky_s32 = round(s * act_info.a() * (1 << 8), arm_compute::RoundingPolicy::TO_NEAREST_EVEN); |
| 86 | int32_t o_leaky_s32 = round((-qi_in.offset * s * act_info.a() + qi_out.offset) * (1 << 8), |
| 87 | arm_compute::RoundingPolicy::TO_NEAREST_EVEN); |
| 88 | const auto vs_leaky_s32 = svdup_n_s32(s_leaky_s32); |
| 89 | const auto vo_leaky_s32 = svdup_n_s32(o_leaky_s32); |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 90 | |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 91 | execute_window_loop(win_collapsed, [&](const Coordinates &) |
| 92 | { |
| 93 | const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr()); |
| 94 | const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); |
| 95 | |
| 96 | svint8_t tmp; |
| 97 | |
| 98 | int x = window_start_x; |
| 99 | svbool_t pg = svwhilelt_b8(x, window_end_x); |
| 100 | do |
| 101 | { |
| 102 | const auto vin = svld1_s8(pg, input_ptr + x); |
| 103 | if(act == ActivationLayerInfo::ActivationFunction::RELU) |
| 104 | { |
| 105 | // Perform activation |
| 106 | tmp = svmax_s8_z(pg, vconst_0, vin); |
| 107 | // Re-quantize to new output space |
| 108 | tmp = requant ? svmla_qasymm8_signed_z(pg, tmp, vs, vo) : tmp; |
| 109 | } |
| 110 | else if(act == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU) |
| 111 | { |
| 112 | // Perform activation |
| 113 | tmp = svmin_s8_z(pg, va, svmax_s8_z(pg, vconst_0, vin)); |
| 114 | // Re-quantize to new output space |
| 115 | tmp = requant ? svmla_qasymm8_signed_z(pg, tmp, vs, vo) : tmp; |
| 116 | } |
| 117 | else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) |
| 118 | { |
| 119 | // Perform activation |
| 120 | tmp = svmin_s8_z(pg, va, svmax_s8_z(pg, vb, vin)); |
| 121 | // Re-quantize to new output space |
| 122 | tmp = requant ? svmla_qasymm8_signed_z(pg, tmp, vs, vo) : tmp; |
| 123 | } |
| 124 | else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC) |
| 125 | { |
| 126 | // De-quantize |
| 127 | const auto vin_deq = svdequantize_z(pg, vin, qi_in); |
| 128 | // Perform activation |
Michalis Spyrou | 168d6a8 | 2022-05-03 17:15:42 +0100 | [diff] [blame] | 129 | const svfloat32x4_t tmp_dep = svcreate4_f32( |
| 130 | svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 0))))), |
| 131 | svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 1))))), |
| 132 | svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 2))))), |
| 133 | svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 3)))))); |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 134 | // Re-quantize to new output space |
| 135 | tmp = svquantize_signed_z(pg, tmp_dep, qi_out); |
| 136 | } |
| 137 | else if(act == ActivationLayerInfo::ActivationFunction::TANH) |
| 138 | { |
| 139 | // De-quantize |
| 140 | const auto vin_deq = svdequantize_z(pg, vin, qi_in); |
| 141 | // Perform activation |
Michalis Spyrou | 168d6a8 | 2022-05-03 17:15:42 +0100 | [diff] [blame] | 142 | const svfloat32x4_t tmp_dep = svcreate4_f32( |
| 143 | svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 0), vb_f32))), |
| 144 | svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 1), vb_f32))), |
| 145 | svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 2), vb_f32))), |
| 146 | svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 3), vb_f32)))); |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 147 | // Re-quantize to new output space |
| 148 | tmp = svquantize_signed_z(pg, tmp_dep, qi_out); |
| 149 | } |
| 150 | else if(act == ActivationLayerInfo::ActivationFunction::HARD_SWISH) |
| 151 | { |
| 152 | // De-quantize |
| 153 | const auto vin_deq = svdequantize_z(pg, vin, qi_in); |
| 154 | // Perform activation |
Michalis Spyrou | 168d6a8 | 2022-05-03 17:15:42 +0100 | [diff] [blame] | 155 | const svfloat32x4_t tmp_dep = svcreate4_f32( |
| 156 | svmul_f32_z(pg, svget4_f32(vin_deq, 0), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 0), const_3_f32))))), |
| 157 | svmul_f32_z(pg, svget4_f32(vin_deq, 1), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 1), const_3_f32))))), |
| 158 | svmul_f32_z(pg, svget4_f32(vin_deq, 2), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 2), const_3_f32))))), |
| 159 | svmul_f32_z(pg, svget4_f32(vin_deq, 3), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 3), const_3_f32)))))); |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 160 | // Re-quantize to new output space |
| 161 | tmp = svquantize_signed_z(pg, tmp_dep, qi_out); |
| 162 | } |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 163 | else if(act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU) |
| 164 | { |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 165 | svbool_t p0, p1, p2, p3; |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 166 | svint32x4_t tmp_dep; |
| 167 | |
| 168 | // Expand to int32 |
Michalis Spyrou | 168d6a8 | 2022-05-03 17:15:42 +0100 | [diff] [blame] | 169 | const svint32x4_t vin_s32 = svcreate4_s32( |
| 170 | svmovlb_s32(svmovlb_s16(vin)), |
| 171 | svmovlt_s32(svmovlb_s16(vin)), |
| 172 | svmovlb_s32(svmovlt_s16(vin)), |
| 173 | svmovlt_s32(svmovlt_s16(vin))); |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 174 | |
| 175 | // Compare elements to input offset |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 176 | if(qi_in.scale >= 0) |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 177 | { |
| 178 | p0 = svcmplt_s32(pg, svget4_s32(vin_s32, 0), voffset_in); |
| 179 | p1 = svcmplt_s32(pg, svget4_s32(vin_s32, 1), voffset_in); |
| 180 | p2 = svcmplt_s32(pg, svget4_s32(vin_s32, 2), voffset_in); |
| 181 | p3 = svcmplt_s32(pg, svget4_s32(vin_s32, 3), voffset_in); |
| 182 | } |
| 183 | else |
| 184 | { |
| 185 | p0 = svcmpgt_s32(pg, svget4_s32(vin_s32, 0), voffset_in); |
| 186 | p1 = svcmpgt_s32(pg, svget4_s32(vin_s32, 1), voffset_in); |
| 187 | p2 = svcmpgt_s32(pg, svget4_s32(vin_s32, 2), voffset_in); |
| 188 | p3 = svcmpgt_s32(pg, svget4_s32(vin_s32, 3), voffset_in); |
| 189 | } |
| 190 | |
| 191 | // Multiply negative elements and requantize if necessary |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 192 | if(requant) |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 193 | { |
| 194 | tmp_dep = svcreate4_s32( |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 195 | svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p0, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 0), svsel(p0, vs_leaky_s32, vs_s32)), 8), |
| 196 | svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p1, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 1), svsel(p1, vs_leaky_s32, vs_s32)), 8), |
| 197 | svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p2, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 2), svsel(p2, vs_leaky_s32, vs_s32)), 8), |
| 198 | svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p3, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 3), svsel(p3, vs_leaky_s32, vs_s32)), 8)); |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 199 | } |
| 200 | else |
| 201 | { |
| 202 | tmp_dep = svcreate4_s32( |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 203 | svasr_n_s32_m(p0, svmad_s32_m(p0, svget4_s32(vin_s32, 0), vs_leaky_s32, vo_leaky_s32), 8), |
| 204 | svasr_n_s32_m(p1, svmad_s32_m(p1, svget4_s32(vin_s32, 1), vs_leaky_s32, vo_leaky_s32), 8), |
| 205 | svasr_n_s32_m(p2, svmad_s32_m(p2, svget4_s32(vin_s32, 2), vs_leaky_s32, vo_leaky_s32), 8), |
| 206 | svasr_n_s32_m(p3, svmad_s32_m(p3, svget4_s32(vin_s32, 3), vs_leaky_s32, vo_leaky_s32), 8)); |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | // Convert uint32 vectors to uint16 vectors (with saturation) |
Georgios Pinitas | f8f0442 | 2021-01-08 17:25:55 +0000 | [diff] [blame] | 210 | const auto v_low_s16 = svqxtnt_s32(svqxtnb_s32(svget4_s32(tmp_dep, 0)), svget4_s32(tmp_dep, 1)); |
arngra01 | 805145d | 2021-01-04 14:28:40 +0000 | [diff] [blame] | 211 | const auto v_high_s16 = svqxtnt_s32(svqxtnb_s32(svget4_s32(tmp_dep, 2)), svget4_s32(tmp_dep, 3)); |
| 212 | |
| 213 | // convert uint16 vectors to uint8 vectors (with saturation) |
| 214 | tmp = svqxtnt_s16(svqxtnb_s16(v_low_s16), v_high_s16); |
| 215 | } |
Michalis Spyrou | aa51a5b | 2020-11-22 00:49:42 +0000 | [diff] [blame] | 216 | else |
| 217 | { |
| 218 | ARM_COMPUTE_ERROR("Unsupported activation function"); |
| 219 | } |
| 220 | |
| 221 | svst1_s8(pg, output_ptr + x, tmp); |
| 222 | |
| 223 | x += svcntb(); |
| 224 | pg = svwhilelt_b8(x, window_end_x); |
| 225 | |
| 226 | } |
| 227 | while(svptest_any(svptrue_b8(), pg)); |
| 228 | }, |
| 229 | input, output); |
| 230 | } |
| 231 | } // namespace cpu |
| 232 | } // namespace arm_compute |