Dana Zlotnik | a538ae5 | 2022-02-21 13:12:41 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2021-2022 Arm Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | |
| 25 | #include "src/cpu/kernels/softmax/generic/sve2/impl.h" |
| 26 | #include "arm_compute/core/Types.h" |
| 27 | #include "src/core/NEON/wrapper/wrapper.h" |
| 28 | |
| 29 | namespace arm_compute |
| 30 | { |
| 31 | namespace cpu |
| 32 | { |
| 33 | template <typename ScalarType> |
| 34 | void sve2_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp, |
| 35 | ITensor *out, float beta, bool is_log, const Window &window) |
| 36 | { |
| 37 | const int start_x = in->info()->valid_region().anchor.x(); |
| 38 | const int input_width = in->info()->valid_region().shape.x(); |
| 39 | |
| 40 | const float scale_beta = -beta * in->info()->quantization_info().uniform().scale; |
| 41 | const auto scale_beta_vec = svdup_n_f32(scale_beta); |
| 42 | |
| 43 | Iterator in_it(in, window); |
| 44 | Iterator max_it(max, window); |
| 45 | Iterator out_it(out, window); |
| 46 | const auto all_true_pg = wrapper::svptrue<ScalarType>(); |
| 47 | using SVEType = typename wrapper::traits::sve_vector<ScalarType>::type; |
| 48 | |
| 49 | const int inc_1 = static_cast<int>(svcntw()); |
| 50 | const int inc_2 = static_cast<int>(2 * svcntw()); |
| 51 | const int inc_3 = static_cast<int>(3 * svcntw()); |
| 52 | |
| 53 | execute_window_loop(window, [&](const Coordinates &) |
| 54 | { |
| 55 | /* Get pointers */ |
| 56 | const auto in_ptr = reinterpret_cast<const ScalarType *>(in_it.ptr()) + start_x; |
| 57 | const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr()) + start_x; |
| 58 | const auto tmp_ptr = reinterpret_cast<float *>(tmp); |
| 59 | |
| 60 | float sum{}; |
| 61 | |
| 62 | /* Compute exponentials and sum */ |
| 63 | { |
| 64 | /* Get max value */ |
| 65 | const auto max_val = *reinterpret_cast<const ScalarType *>(max_it.ptr()); |
| 66 | const auto vec_max = wrapper::svdup_n(max_val); |
| 67 | |
| 68 | /* Init sum to zero */ |
| 69 | auto vec_sum_0 = svdup_n_f32(0.f); |
| 70 | auto vec_sum_1 = svdup_n_f32(0.f); |
| 71 | auto vec_sum_2 = svdup_n_f32(0.f); |
| 72 | auto vec_sum_3 = svdup_n_f32(0.f); |
| 73 | |
| 74 | /* Loop over row and compute exponentials and sum */ |
| 75 | int x = 0; |
| 76 | svbool_t pg = wrapper::svwhilelt<ScalarType>(x, input_width); |
| 77 | svbool_t pg_0 = svunpklo(svunpklo(pg)); |
| 78 | svbool_t pg_1 = svunpkhi(svunpklo(pg)); |
| 79 | svbool_t pg_2 = svunpklo(svunpkhi(pg)); |
| 80 | svbool_t pg_3 = svunpkhi(svunpkhi(pg)); |
| 81 | do |
| 82 | { |
Viet-Hoa Do | f1f7779 | 2022-06-15 16:47:17 +0100 | [diff] [blame] | 83 | const auto vec_elements = svld1(pg, in_ptr + x); |
| 84 | const auto vec_elements_sub = svreinterpret_u8(svsub_z(pg, vec_max, vec_elements)); |
Dana Zlotnik | a538ae5 | 2022-02-21 13:12:41 +0200 | [diff] [blame] | 85 | |
Viet-Hoa Do | f1f7779 | 2022-06-15 16:47:17 +0100 | [diff] [blame] | 86 | auto vec_elements_flt_0 = svcvt_f32_z(pg_0, svunpklo(svunpklo(vec_elements_sub))); |
| 87 | auto vec_elements_flt_1 = svcvt_f32_z(pg_1, svunpkhi(svunpklo(vec_elements_sub))); |
| 88 | auto vec_elements_flt_2 = svcvt_f32_z(pg_2, svunpklo(svunpkhi(vec_elements_sub))); |
| 89 | auto vec_elements_flt_3 = svcvt_f32_z(pg_3, svunpkhi(svunpkhi(vec_elements_sub))); |
Dana Zlotnik | a538ae5 | 2022-02-21 13:12:41 +0200 | [diff] [blame] | 90 | |
| 91 | if(is_log) |
| 92 | { |
| 93 | vec_elements_flt_0 = svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec); |
| 94 | vec_elements_flt_1 = svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec); |
| 95 | vec_elements_flt_2 = svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec); |
| 96 | vec_elements_flt_3 = svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec); |
| 97 | vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, svexp_f32_z(pg_0, vec_elements_flt_0)); |
| 98 | vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, svexp_f32_z(pg_1, vec_elements_flt_1)); |
| 99 | vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, svexp_f32_z(pg_2, vec_elements_flt_2)); |
| 100 | vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, svexp_f32_z(pg_3, vec_elements_flt_3)); |
| 101 | } |
| 102 | else |
| 103 | { |
| 104 | vec_elements_flt_0 = svexp_f32_z(pg_0, svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec)); |
| 105 | vec_elements_flt_1 = svexp_f32_z(pg_1, svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec)); |
| 106 | vec_elements_flt_2 = svexp_f32_z(pg_2, svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec)); |
| 107 | vec_elements_flt_3 = svexp_f32_z(pg_3, svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec)); |
| 108 | vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, vec_elements_flt_0); |
| 109 | vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, vec_elements_flt_1); |
| 110 | vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, vec_elements_flt_2); |
| 111 | vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, vec_elements_flt_3); |
| 112 | } |
| 113 | |
| 114 | svst1_f32(pg_0, tmp_ptr + x, vec_elements_flt_0); |
| 115 | svst1_f32(pg_1, tmp_ptr + x + inc_1, vec_elements_flt_1); |
| 116 | svst1_f32(pg_2, tmp_ptr + x + inc_2, vec_elements_flt_2); |
| 117 | svst1_f32(pg_3, tmp_ptr + x + inc_3, vec_elements_flt_3); |
| 118 | |
| 119 | x += wrapper::svcnt<ScalarType>(); |
| 120 | pg = wrapper::svwhilelt<ScalarType>(x, input_width); |
| 121 | pg_0 = svunpklo(svunpklo(pg)); |
| 122 | pg_1 = svunpkhi(svunpklo(pg)); |
| 123 | pg_2 = svunpklo(svunpkhi(pg)); |
| 124 | pg_3 = svunpkhi(svunpkhi(pg)); |
| 125 | } |
| 126 | while(svptest_any(all_true_pg, pg)); |
| 127 | |
| 128 | /* Reduce sum */ |
| 129 | const auto vec_sum = svadd_f32_z(all_true_pg, svadd_f32_z(all_true_pg, vec_sum_0, vec_sum_1), svadd_f32_z(all_true_pg, vec_sum_2, vec_sum_3)); |
| 130 | sum = svaddv_f32(all_true_pg, vec_sum); |
| 131 | |
| 132 | /* Run remaining elements */ |
| 133 | x = 0; |
| 134 | if(is_log) |
| 135 | { |
| 136 | sum = std::log(sum); |
| 137 | } |
| 138 | else |
| 139 | { |
| 140 | sum = 256.f / sum; |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | /* Normalize exponentials */ |
| 145 | { |
| 146 | constexpr bool is_qasymm8_signed = std::is_same<ScalarType, qasymm8_signed_t>::value; |
| 147 | /* Loop over row and compute softmax */ |
| 148 | int x = 0; |
| 149 | svbool_t pg = wrapper::svwhilelt<ScalarType>(x, input_width); |
| 150 | svbool_t pg_0 = svunpklo(svunpklo(pg)); |
| 151 | svbool_t pg_1 = svunpkhi(svunpklo(pg)); |
| 152 | svbool_t pg_2 = svunpklo(svunpkhi(pg)); |
| 153 | svbool_t pg_3 = svunpkhi(svunpkhi(pg)); |
| 154 | do |
| 155 | { |
| 156 | auto vec_in_0 = svld1_f32(pg_0, tmp_ptr + x); |
| 157 | auto vec_in_1 = svld1_f32(pg_1, tmp_ptr + x + inc_1); |
| 158 | auto vec_in_2 = svld1_f32(pg_2, tmp_ptr + x + inc_2); |
| 159 | auto vec_in_3 = svld1_f32(pg_3, tmp_ptr + x + inc_3); |
| 160 | |
| 161 | svfloat32_t res_0{}; |
| 162 | svfloat32_t res_1{}; |
| 163 | svfloat32_t res_2{}; |
| 164 | svfloat32_t res_3{}; |
| 165 | |
| 166 | if(is_log) |
| 167 | { |
| 168 | res_0 = svsub_f32_z(pg_0, vec_in_0, svdup_n_f32(sum)); |
| 169 | res_1 = svsub_f32_z(pg_1, vec_in_1, svdup_n_f32(sum)); |
| 170 | res_2 = svsub_f32_z(pg_2, vec_in_2, svdup_n_f32(sum)); |
| 171 | res_3 = svsub_f32_z(pg_3, vec_in_3, svdup_n_f32(sum)); |
| 172 | } |
| 173 | else |
| 174 | { |
| 175 | res_0 = svmul_f32_z(pg_0, vec_in_0, svdup_n_f32(sum)); |
| 176 | res_1 = svmul_f32_z(pg_1, vec_in_1, svdup_n_f32(sum)); |
| 177 | res_2 = svmul_f32_z(pg_2, vec_in_2, svdup_n_f32(sum)); |
| 178 | res_3 = svmul_f32_z(pg_3, vec_in_3, svdup_n_f32(sum)); |
| 179 | |
| 180 | if(is_qasymm8_signed) |
| 181 | { |
| 182 | const auto offset_vec = svdup_n_f32(128.f); |
Viet-Hoa Do | f1f7779 | 2022-06-15 16:47:17 +0100 | [diff] [blame] | 183 | res_0 = svsub_z(pg_0, res_0, offset_vec); |
| 184 | res_1 = svsub_z(pg_1, res_1, offset_vec); |
| 185 | res_2 = svsub_z(pg_2, res_2, offset_vec); |
| 186 | res_3 = svsub_z(pg_3, res_3, offset_vec); |
Dana Zlotnik | a538ae5 | 2022-02-21 13:12:41 +0200 | [diff] [blame] | 187 | } |
| 188 | } |
| 189 | |
| 190 | // Store value |
| 191 | const auto out = convert_float_to_int<SVEType>(res_0, res_1, res_2, res_3); |
| 192 | svst1(pg, out_ptr + x, out); |
| 193 | x += wrapper::svcnt<ScalarType>(); |
| 194 | pg = wrapper::svwhilelt<ScalarType>(x, input_width); |
| 195 | pg_0 = svunpklo(svunpklo(pg)); |
| 196 | pg_1 = svunpkhi(svunpklo(pg)); |
| 197 | pg_2 = svunpklo(svunpkhi(pg)); |
| 198 | pg_3 = svunpkhi(svunpkhi(pg)); |
| 199 | } |
| 200 | while(svptest_any(all_true_pg, pg)); |
| 201 | } |
| 202 | }, |
| 203 | in_it, max_it, out_it); |
| 204 | } |
| 205 | |
| 206 | template void sve2_softmax_logits_1d_quantized<qasymm8_signed_t>(const ITensor *in, const ITensor *max, void *const tmp, |
| 207 | ITensor *out, float beta, bool is_log, const Window &window); |
| 208 | template void sve2_softmax_logits_1d_quantized<qasymm8_t>(const ITensor *in, const ITensor *max, void *const tmp, |
| 209 | ITensor *out, float beta, bool is_log, const Window &window); |
| 210 | } // namespace cpu |
| 211 | } // namespace arm_compute |