blob: 6f2d5d8533a2d113c06d57bb7d91803d943d974a [file] [log] [blame]
Michalis Spyrouc4d45552020-10-19 12:41:30 +01001/*
Georgios Pinitas70eb53b2021-01-06 19:42:21 +00002 * Copyright (c) 2020-2021 Arm Limited.
Michalis Spyrouc4d45552020-10-19 12:41:30 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "src/core/NEON/NEMath.h"
25
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/Validate.h"
28#include "src/core/NEON/wrapper/wrapper.h"
Michalis Spyrouc4d45552020-10-19 12:41:30 +010029
30#include <arm_neon.h>
31#include <cmath>
32#include <cstddef>
33
34#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
35
36namespace arm_compute
37{
38namespace cpu
39{
40namespace
41{
42#ifndef __aarch64__
43inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &mask)
44{
45 auto int_in = vreinterpretq_u16_f16(in);
46 return vreinterpretq_f16_u16(wrapper::vand(int_in, mask));
47}
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000048#endif /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +010049} // namespace
50
51void fp16_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
52{
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +000053 /** SIMD vector tag type. */
Michalis Spyrouc4d45552020-10-19 12:41:30 +010054 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
55 const ActivationLayerInfo::ActivationFunction act = act_info.activation();
56
57 constexpr int window_step_x = 8;
58 const auto window_start_x = static_cast<int>(window.x().start());
59 const auto window_end_x = static_cast<int>(window.x().end());
60
61 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
62 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
63
64 Iterator input(src, win_collapsed);
65 Iterator output(dst, win_collapsed);
66
67 // In case of non-aarch64, a small delta value is added to the input
68 // to prevent NAN values caused by zeros in inputs to SQRT.
69 // In case of aarh64, we call vsqrt directly, so we don't use delta.
70#ifndef __aarch64__
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000071 const auto delta = wrapper::vdup_n(static_cast<float16_t>((1e-7), ExactTagType {}));
72#endif /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +010073
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000074 const auto const_1 = wrapper::vdup_n(static_cast<float16_t>(1.f), ExactTagType{});
75 const auto const_0 = wrapper::vdup_n(static_cast<float16_t>(0.f), ExactTagType{});
76 const auto const_6 = wrapper::vdup_n(static_cast<float16_t>(6.f), ExactTagType{});
77 const auto const_3 = wrapper::vdup_n(static_cast<float16_t>(3.f), ExactTagType{});
78 const auto const_inv_6 = wrapper::vdup_n(static_cast<float16_t>(0.166666667f), ExactTagType{});
Michalis Spyrouc4d45552020-10-19 12:41:30 +010079
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000080 constexpr float soft_relu_thresh = 12.f;
81 const auto vsoft_relu_thresh = wrapper::vdup_n(static_cast<float16_t>(soft_relu_thresh), ExactTagType{});
82
83 const auto va = wrapper::vdup_n(static_cast<float16_t>(act_info.a()), ExactTagType{});
84 const auto vb = wrapper::vdup_n(static_cast<float16_t>(act_info.b()), ExactTagType{});
85 const auto a = static_cast<float16_t>(act_info.a());
86 const auto b = static_cast<float16_t>(act_info.b());
87 execute_window_loop(win_collapsed, [&](const Coordinates &)
Michalis Spyrouc4d45552020-10-19 12:41:30 +010088 {
89 const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
90 const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
91
92 wrapper::traits::neon_bitvector_t<float16_t, wrapper::traits::BitWidth::W128> tmp;
93
94 // Compute S elements per iteration
95 int x = window_start_x;
96 for(; x <= (window_end_x - window_step_x); x += window_step_x)
97 {
98 const auto vin = wrapper::vloadq(input_ptr + x);
99 switch(act)
100 {
101 case ActivationLayerInfo::ActivationFunction::ABS:
102 tmp = wrapper::vabs(vin);
103 break;
104 case ActivationLayerInfo::ActivationFunction::LINEAR:
105 tmp = wrapper::vmla(vb, va, vin);
106 break;
107 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
108 tmp = wrapper::vinv(wrapper::vadd(const_1, wrapper::vexpq(wrapper::vneg(vin))));
109 break;
110 case ActivationLayerInfo::ActivationFunction::RELU:
111 tmp = wrapper::vmax(const_0, vin);
112 break;
113 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
114 tmp = wrapper::vmin(va, wrapper::vmax(const_0, vin));
115 break;
116 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
117 tmp = wrapper::vmin(va, wrapper::vmax(vb, vin));
118 break;
119 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
120 tmp = wrapper::vbsl(wrapper::vcgt(vin, const_0), vin, wrapper::vmul(va, vin));
121 break;
122 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000123 tmp = wrapper::vbsl(wrapper::vcgt(vin, vsoft_relu_thresh), vin, wrapper::vlog(wrapper::vadd(const_1, wrapper::vexpq(vin))));
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100124 break;
125 case ActivationLayerInfo::ActivationFunction::ELU:
126 tmp = wrapper::vbsl(wrapper::vcge(vin, const_0), vin, wrapper::vmul(va, wrapper::vsub(wrapper::vexpq(vin), const_1)));
127 break;
128 case ActivationLayerInfo::ActivationFunction::SQRT:
129#ifdef __aarch64__
130 tmp = wrapper::vsqrt(vin);
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000131#else /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100132 {
133 const auto bitmask = wrapper::vceq(vin, wrapper::vdup_n(0, ExactTagType{}));
134 tmp = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, mask_float_vector(delta, bitmask))));
135 tmp = mask_float_vector(tmp, wrapper::vnot(bitmask));
136 }
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000137#endif /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100138 break;
139 case ActivationLayerInfo::ActivationFunction::SQUARE:
140 tmp = wrapper::vmul(vin, vin);
141 break;
142 case ActivationLayerInfo::ActivationFunction::TANH:
143 tmp = wrapper::vmul(va, wrapper::vtanh(wrapper::vmul(vb, vin)));
144 break;
145 case ActivationLayerInfo::ActivationFunction::IDENTITY:
146 tmp = vin;
147 break;
148 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
149 tmp = wrapper::vmul(vin, wrapper::vmul(const_inv_6, wrapper::vmin(const_6, wrapper::vmax(const_0, wrapper::vadd(vin, const_3)))));
150 break;
151 default:
152 ARM_COMPUTE_ERROR("Unsupported activation function");
153 }
154 wrapper::vstore(output_ptr + x, tmp);
155 }
156
157 // Compute left-over elements
158 for(; x < window_end_x; ++x)
159 {
160 const float16_t in = *(reinterpret_cast<const float16_t *>(input_ptr + x));
161 float16_t tmp;
162 switch(act)
163 {
164 case ActivationLayerInfo::ActivationFunction::ABS:
165 tmp = std::abs(in);
166 break;
167 case ActivationLayerInfo::ActivationFunction::LINEAR:
168 tmp = a * in + b;
169 break;
170 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
171 tmp = static_cast<float16_t>(1) / (static_cast<float16_t>(1) + std::exp(-in));
172 break;
173 case ActivationLayerInfo::ActivationFunction::RELU:
174 tmp = std::max<float16_t>(static_cast<float16_t>(0), in);
175 break;
176 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
177 tmp = std::min<float16_t>(a, std::max(static_cast<float16_t>(0), in));
178 break;
179 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
180 tmp = std::min<float16_t>(a, std::max<float16_t>(b, in));
181 break;
182 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
183 tmp = (in > 0) ? in : a * in;
184 break;
185 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000186 tmp = (in > soft_relu_thresh) ? in : std::log(static_cast<float16_t>(1) + std::exp(in));
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100187 break;
188 case ActivationLayerInfo::ActivationFunction::ELU:
189 tmp = (in >= 0) ? in : a * (std::exp(in) - 1);
190 break;
191 case ActivationLayerInfo::ActivationFunction::SQRT:
192 tmp = std::sqrt(in);
193 break;
194 case ActivationLayerInfo::ActivationFunction::SQUARE:
195 tmp = in * in;
196 break;
197 case ActivationLayerInfo::ActivationFunction::TANH:
198 tmp = a * std::tanh(b * in);
199 break;
200 case ActivationLayerInfo::ActivationFunction::IDENTITY:
201 tmp = in;
202 break;
203 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
204 tmp = in * ((std::min(std::max((in + 3), 0.0f), 6.0f)) * 0.166666667f);
205 break;
206 default:
207 ARM_COMPUTE_ERROR("Unsupported activation function");
208 }
209 *(output_ptr + x) = tmp;
210 }
211 },
212 input, output);
213}
214} // namespace cpu
215} // namespace arm_compute
216
Sheri Zhangac6499a2021-02-10 15:32:38 +0000217#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */