blob: 989f825eb9829b6808b671a1963dfcf25d9e4251 [file] [log] [blame]
Michalis Spyrouaa51a5b2020-11-22 00:49:42 +00001/*
arngra01805145d2021-01-04 14:28:40 +00002 * Copyright (c) 2020-2021 Arm Limited.
Michalis Spyrouaa51a5b2020-11-22 00:49:42 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Helpers.h"
25#include "arm_compute/core/Window.h"
26#include "src/core/NEON/wrapper/wrapper.h"
Michalis Spyrouaa51a5b2020-11-22 00:49:42 +000027
28#include <cmath>
29#include <cstddef>
30
31#if defined(__ARM_FEATURE_SVE2)
32#include "src/core/NEON/SVEAsymm.h"
33#include "src/core/NEON/SVEMath.h"
34#include <arm_sve.h>
35
36namespace arm_compute
37{
38namespace cpu
39{
40void qasymm8_signed_sve_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
41{
42 const auto window_start_x = static_cast<int>(window.x().start());
43 const auto window_end_x = static_cast<int>(window.x().end());
44 const ActivationLayerInfo::ActivationFunction act = act_info.activation();
45
46 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
47 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
48
49 Iterator input(src, win_collapsed);
50 Iterator output(dst, win_collapsed);
51
52 const UniformQuantizationInfo qi_in = src->info()->quantization_info().uniform();
53 const UniformQuantizationInfo qi_out = dst->info()->quantization_info().uniform();
54 const auto va = svdup_n_s8(quantize_qasymm8_signed(act_info.a(), qi_in));
55 const auto vb = svdup_n_s8(quantize_qasymm8_signed(act_info.b(), qi_in));
56 const auto const_0 = quantize_qasymm8_signed(0.f, qi_in);
57 const auto vconst_0 = svdup_n_s8(const_0);
58 const auto vconst_1 = svdup_n_f32(1.f);
59 const auto va_f32 = svdup_n_f32(act_info.a());
60 const auto vb_f32 = svdup_n_f32(act_info.b());
61 const auto const_6_f32 = svdup_n_f32(6.f);
62 const auto const_0_f32 = svdup_n_f32(0.f);
63 const auto const_3_f32 = svdup_n_f32(3.f);
64 const auto const_inv_6_f32 = svdup_n_f32(0.166666667f);
65
66 // Initialise scale/offset for re-quantization
67 bool requant = true;
68 if(qi_in.scale == qi_out.scale && qi_in.offset == qi_out.offset)
69 {
70 requant = false;
71 }
72 float s = qi_in.scale / qi_out.scale;
73 float o = -qi_in.offset * s + qi_out.offset;
74 auto vs = svdup_n_f32(s);
75 auto vo = svdup_n_f32(o);
76
arngra01805145d2021-01-04 14:28:40 +000077 // Initialise scale/offset for re-quantization with int32_t
Georgios Pinitasf8f04422021-01-08 17:25:55 +000078 const auto voffset_in = svdup_n_s32(qi_in.offset);
79 int32_t s_s32 = round(s * (1 << 8), arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
80 int32_t o_s32 = round(o * (1 << 8), arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
81 const auto vs_s32 = svdup_n_s32(s_s32);
82 const auto vo_s32 = svdup_n_s32(o_s32);
arngra01805145d2021-01-04 14:28:40 +000083
84 // Initialise scale/offset for re-quantization for leaky relu
Georgios Pinitasf8f04422021-01-08 17:25:55 +000085 int32_t s_leaky_s32 = round(s * act_info.a() * (1 << 8), arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
86 int32_t o_leaky_s32 = round((-qi_in.offset * s * act_info.a() + qi_out.offset) * (1 << 8),
87 arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
88 const auto vs_leaky_s32 = svdup_n_s32(s_leaky_s32);
89 const auto vo_leaky_s32 = svdup_n_s32(o_leaky_s32);
arngra01805145d2021-01-04 14:28:40 +000090
Michalis Spyrouaa51a5b2020-11-22 00:49:42 +000091 execute_window_loop(win_collapsed, [&](const Coordinates &)
92 {
93 const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr());
94 const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
95
96 svint8_t tmp;
97
98 int x = window_start_x;
99 svbool_t pg = svwhilelt_b8(x, window_end_x);
100 do
101 {
102 const auto vin = svld1_s8(pg, input_ptr + x);
103 if(act == ActivationLayerInfo::ActivationFunction::RELU)
104 {
105 // Perform activation
106 tmp = svmax_s8_z(pg, vconst_0, vin);
107 // Re-quantize to new output space
108 tmp = requant ? svmla_qasymm8_signed_z(pg, tmp, vs, vo) : tmp;
109 }
110 else if(act == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
111 {
112 // Perform activation
113 tmp = svmin_s8_z(pg, va, svmax_s8_z(pg, vconst_0, vin));
114 // Re-quantize to new output space
115 tmp = requant ? svmla_qasymm8_signed_z(pg, tmp, vs, vo) : tmp;
116 }
117 else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
118 {
119 // Perform activation
120 tmp = svmin_s8_z(pg, va, svmax_s8_z(pg, vb, vin));
121 // Re-quantize to new output space
122 tmp = requant ? svmla_qasymm8_signed_z(pg, tmp, vs, vo) : tmp;
123 }
124 else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
125 {
126 // De-quantize
127 const auto vin_deq = svdequantize_z(pg, vin, qi_in);
128 // Perform activation
129 const svfloat32x4_t tmp_dep =
130 {
131 { {
132 svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 0))))),
133 svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 1))))),
134 svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 2))))),
135 svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 3))))),
136 }
137 }
138 };
139 // Re-quantize to new output space
140 tmp = svquantize_signed_z(pg, tmp_dep, qi_out);
141 }
142 else if(act == ActivationLayerInfo::ActivationFunction::TANH)
143 {
144 // De-quantize
145 const auto vin_deq = svdequantize_z(pg, vin, qi_in);
146 // Perform activation
147 const svfloat32x4_t tmp_dep =
148 {
149 { {
150 svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 0), vb_f32))),
151 svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 1), vb_f32))),
152 svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 2), vb_f32))),
153 svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 3), vb_f32))),
154 }
155 }
156 };
157 // Re-quantize to new output space
158 tmp = svquantize_signed_z(pg, tmp_dep, qi_out);
159 }
160 else if(act == ActivationLayerInfo::ActivationFunction::HARD_SWISH)
161 {
162 // De-quantize
163 const auto vin_deq = svdequantize_z(pg, vin, qi_in);
164 // Perform activation
165 const svfloat32x4_t tmp_dep =
166 {
167 { {
168 svmul_f32_z(pg, svget4_f32(vin_deq, 0), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 0), const_3_f32))))),
169 svmul_f32_z(pg, svget4_f32(vin_deq, 1), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 1), const_3_f32))))),
170 svmul_f32_z(pg, svget4_f32(vin_deq, 2), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 2), const_3_f32))))),
171 svmul_f32_z(pg, svget4_f32(vin_deq, 3), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 3), const_3_f32))))),
172 }
173 }
174 };
175 // Re-quantize to new output space
176 tmp = svquantize_signed_z(pg, tmp_dep, qi_out);
177 }
arngra01805145d2021-01-04 14:28:40 +0000178 else if(act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU)
179 {
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000180 svbool_t p0, p1, p2, p3;
arngra01805145d2021-01-04 14:28:40 +0000181 svint32x4_t tmp_dep;
182
183 // Expand to int32
184 const svint32x4_t vin_s32 =
185 {
186 { {
187 svmovlb_s32(svmovlb_s16(vin)),
188 svmovlt_s32(svmovlb_s16(vin)),
189 svmovlb_s32(svmovlt_s16(vin)),
190 svmovlt_s32(svmovlt_s16(vin)),
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000191 }
192 }
arngra01805145d2021-01-04 14:28:40 +0000193 };
194
195 // Compare elements to input offset
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000196 if(qi_in.scale >= 0)
arngra01805145d2021-01-04 14:28:40 +0000197 {
198 p0 = svcmplt_s32(pg, svget4_s32(vin_s32, 0), voffset_in);
199 p1 = svcmplt_s32(pg, svget4_s32(vin_s32, 1), voffset_in);
200 p2 = svcmplt_s32(pg, svget4_s32(vin_s32, 2), voffset_in);
201 p3 = svcmplt_s32(pg, svget4_s32(vin_s32, 3), voffset_in);
202 }
203 else
204 {
205 p0 = svcmpgt_s32(pg, svget4_s32(vin_s32, 0), voffset_in);
206 p1 = svcmpgt_s32(pg, svget4_s32(vin_s32, 1), voffset_in);
207 p2 = svcmpgt_s32(pg, svget4_s32(vin_s32, 2), voffset_in);
208 p3 = svcmpgt_s32(pg, svget4_s32(vin_s32, 3), voffset_in);
209 }
210
211 // Multiply negative elements and requantize if necessary
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000212 if(requant)
arngra01805145d2021-01-04 14:28:40 +0000213 {
214 tmp_dep = svcreate4_s32(
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000215 svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p0, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 0), svsel(p0, vs_leaky_s32, vs_s32)), 8),
216 svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p1, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 1), svsel(p1, vs_leaky_s32, vs_s32)), 8),
217 svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p2, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 2), svsel(p2, vs_leaky_s32, vs_s32)), 8),
218 svasr_n_s32_m(pg, svmla_s32_m(pg, svsel(p3, vo_leaky_s32, vo_s32), svget4_s32(vin_s32, 3), svsel(p3, vs_leaky_s32, vs_s32)), 8));
arngra01805145d2021-01-04 14:28:40 +0000219 }
220 else
221 {
222 tmp_dep = svcreate4_s32(
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000223 svasr_n_s32_m(p0, svmad_s32_m(p0, svget4_s32(vin_s32, 0), vs_leaky_s32, vo_leaky_s32), 8),
224 svasr_n_s32_m(p1, svmad_s32_m(p1, svget4_s32(vin_s32, 1), vs_leaky_s32, vo_leaky_s32), 8),
225 svasr_n_s32_m(p2, svmad_s32_m(p2, svget4_s32(vin_s32, 2), vs_leaky_s32, vo_leaky_s32), 8),
226 svasr_n_s32_m(p3, svmad_s32_m(p3, svget4_s32(vin_s32, 3), vs_leaky_s32, vo_leaky_s32), 8));
arngra01805145d2021-01-04 14:28:40 +0000227 }
228
229 // Convert uint32 vectors to uint16 vectors (with saturation)
Georgios Pinitasf8f04422021-01-08 17:25:55 +0000230 const auto v_low_s16 = svqxtnt_s32(svqxtnb_s32(svget4_s32(tmp_dep, 0)), svget4_s32(tmp_dep, 1));
arngra01805145d2021-01-04 14:28:40 +0000231 const auto v_high_s16 = svqxtnt_s32(svqxtnb_s32(svget4_s32(tmp_dep, 2)), svget4_s32(tmp_dep, 3));
232
233 // convert uint16 vectors to uint8 vectors (with saturation)
234 tmp = svqxtnt_s16(svqxtnb_s16(v_low_s16), v_high_s16);
235 }
Michalis Spyrouaa51a5b2020-11-22 00:49:42 +0000236 else
237 {
238 ARM_COMPUTE_ERROR("Unsupported activation function");
239 }
240
241 svst1_s8(pg, output_ptr + x, tmp);
242
243 x += svcntb();
244 pg = svwhilelt_b8(x, window_end_x);
245
246 }
247 while(svptest_any(svptrue_b8(), pg));
248 },
249 input, output);
250}
251} // namespace cpu
252} // namespace arm_compute
253#endif /* defined(__ARM_FEATURE_SVE2) */