blob: 7fe4ab3f63ce462c1378fdbc351eb0732d1c73ef [file] [log] [blame]
Michalis Spyrouc4d45552020-10-19 12:41:30 +01001/*
Georgios Pinitas70eb53b2021-01-06 19:42:21 +00002 * Copyright (c) 2020-2021 Arm Limited.
Michalis Spyrouc4d45552020-10-19 12:41:30 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "src/core/NEON/NEMath.h"
25
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/Validate.h"
28#include "src/core/NEON/wrapper/wrapper.h"
Michalis Spyrouc4d45552020-10-19 12:41:30 +010029#include "src/core/common/Validate.h"
30
31#include <arm_neon.h>
32#include <cmath>
33#include <cstddef>
34
35#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
36
37namespace arm_compute
38{
39namespace cpu
40{
41namespace
42{
43#ifndef __aarch64__
44inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &mask)
45{
46 auto int_in = vreinterpretq_u16_f16(in);
47 return vreinterpretq_f16_u16(wrapper::vand(int_in, mask));
48}
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000049#endif /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +010050} // namespace
51
52void fp16_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
53{
Sheri Zhangcece42c2021-02-10 15:32:38 +000054 /** Neon vector tag type. */
Michalis Spyrouc4d45552020-10-19 12:41:30 +010055 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
56 const ActivationLayerInfo::ActivationFunction act = act_info.activation();
57
58 constexpr int window_step_x = 8;
59 const auto window_start_x = static_cast<int>(window.x().start());
60 const auto window_end_x = static_cast<int>(window.x().end());
61
62 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
63 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
64
65 Iterator input(src, win_collapsed);
66 Iterator output(dst, win_collapsed);
67
68 // In case of non-aarch64, a small delta value is added to the input
69 // to prevent NAN values caused by zeros in inputs to SQRT.
70 // In case of aarh64, we call vsqrt directly, so we don't use delta.
71#ifndef __aarch64__
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000072 const auto delta = wrapper::vdup_n(static_cast<float16_t>((1e-7), ExactTagType {}));
73#endif /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +010074
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000075 const auto const_1 = wrapper::vdup_n(static_cast<float16_t>(1.f), ExactTagType{});
76 const auto const_0 = wrapper::vdup_n(static_cast<float16_t>(0.f), ExactTagType{});
77 const auto const_6 = wrapper::vdup_n(static_cast<float16_t>(6.f), ExactTagType{});
78 const auto const_3 = wrapper::vdup_n(static_cast<float16_t>(3.f), ExactTagType{});
79 const auto const_inv_6 = wrapper::vdup_n(static_cast<float16_t>(0.166666667f), ExactTagType{});
Michalis Spyrouc4d45552020-10-19 12:41:30 +010080
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +000081 constexpr float soft_relu_thresh = 12.f;
82 const auto vsoft_relu_thresh = wrapper::vdup_n(static_cast<float16_t>(soft_relu_thresh), ExactTagType{});
83
84 const auto va = wrapper::vdup_n(static_cast<float16_t>(act_info.a()), ExactTagType{});
85 const auto vb = wrapper::vdup_n(static_cast<float16_t>(act_info.b()), ExactTagType{});
86 const auto a = static_cast<float16_t>(act_info.a());
87 const auto b = static_cast<float16_t>(act_info.b());
88 execute_window_loop(win_collapsed, [&](const Coordinates &)
Michalis Spyrouc4d45552020-10-19 12:41:30 +010089 {
90 const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
91 const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
92
93 wrapper::traits::neon_bitvector_t<float16_t, wrapper::traits::BitWidth::W128> tmp;
94
95 // Compute S elements per iteration
96 int x = window_start_x;
97 for(; x <= (window_end_x - window_step_x); x += window_step_x)
98 {
99 const auto vin = wrapper::vloadq(input_ptr + x);
100 switch(act)
101 {
102 case ActivationLayerInfo::ActivationFunction::ABS:
103 tmp = wrapper::vabs(vin);
104 break;
105 case ActivationLayerInfo::ActivationFunction::LINEAR:
106 tmp = wrapper::vmla(vb, va, vin);
107 break;
108 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
109 tmp = wrapper::vinv(wrapper::vadd(const_1, wrapper::vexpq(wrapper::vneg(vin))));
110 break;
111 case ActivationLayerInfo::ActivationFunction::RELU:
112 tmp = wrapper::vmax(const_0, vin);
113 break;
114 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
115 tmp = wrapper::vmin(va, wrapper::vmax(const_0, vin));
116 break;
117 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
118 tmp = wrapper::vmin(va, wrapper::vmax(vb, vin));
119 break;
120 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
121 tmp = wrapper::vbsl(wrapper::vcgt(vin, const_0), vin, wrapper::vmul(va, vin));
122 break;
123 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000124 tmp = wrapper::vbsl(wrapper::vcgt(vin, vsoft_relu_thresh), vin, wrapper::vlog(wrapper::vadd(const_1, wrapper::vexpq(vin))));
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100125 break;
126 case ActivationLayerInfo::ActivationFunction::ELU:
127 tmp = wrapper::vbsl(wrapper::vcge(vin, const_0), vin, wrapper::vmul(va, wrapper::vsub(wrapper::vexpq(vin), const_1)));
128 break;
129 case ActivationLayerInfo::ActivationFunction::SQRT:
130#ifdef __aarch64__
131 tmp = wrapper::vsqrt(vin);
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000132#else /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100133 {
134 const auto bitmask = wrapper::vceq(vin, wrapper::vdup_n(0, ExactTagType{}));
135 tmp = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, mask_float_vector(delta, bitmask))));
136 tmp = mask_float_vector(tmp, wrapper::vnot(bitmask));
137 }
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000138#endif /* __aarch64__ */
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100139 break;
140 case ActivationLayerInfo::ActivationFunction::SQUARE:
141 tmp = wrapper::vmul(vin, vin);
142 break;
143 case ActivationLayerInfo::ActivationFunction::TANH:
144 tmp = wrapper::vmul(va, wrapper::vtanh(wrapper::vmul(vb, vin)));
145 break;
146 case ActivationLayerInfo::ActivationFunction::IDENTITY:
147 tmp = vin;
148 break;
149 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
150 tmp = wrapper::vmul(vin, wrapper::vmul(const_inv_6, wrapper::vmin(const_6, wrapper::vmax(const_0, wrapper::vadd(vin, const_3)))));
151 break;
152 default:
153 ARM_COMPUTE_ERROR("Unsupported activation function");
154 }
155 wrapper::vstore(output_ptr + x, tmp);
156 }
157
158 // Compute left-over elements
159 for(; x < window_end_x; ++x)
160 {
161 const float16_t in = *(reinterpret_cast<const float16_t *>(input_ptr + x));
162 float16_t tmp;
163 switch(act)
164 {
165 case ActivationLayerInfo::ActivationFunction::ABS:
166 tmp = std::abs(in);
167 break;
168 case ActivationLayerInfo::ActivationFunction::LINEAR:
169 tmp = a * in + b;
170 break;
171 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
172 tmp = static_cast<float16_t>(1) / (static_cast<float16_t>(1) + std::exp(-in));
173 break;
174 case ActivationLayerInfo::ActivationFunction::RELU:
175 tmp = std::max<float16_t>(static_cast<float16_t>(0), in);
176 break;
177 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
178 tmp = std::min<float16_t>(a, std::max(static_cast<float16_t>(0), in));
179 break;
180 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
181 tmp = std::min<float16_t>(a, std::max<float16_t>(b, in));
182 break;
183 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
184 tmp = (in > 0) ? in : a * in;
185 break;
186 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
Michele Di Giorgio6c25aad2021-01-14 16:17:48 +0000187 tmp = (in > soft_relu_thresh) ? in : std::log(static_cast<float16_t>(1) + std::exp(in));
Michalis Spyrouc4d45552020-10-19 12:41:30 +0100188 break;
189 case ActivationLayerInfo::ActivationFunction::ELU:
190 tmp = (in >= 0) ? in : a * (std::exp(in) - 1);
191 break;
192 case ActivationLayerInfo::ActivationFunction::SQRT:
193 tmp = std::sqrt(in);
194 break;
195 case ActivationLayerInfo::ActivationFunction::SQUARE:
196 tmp = in * in;
197 break;
198 case ActivationLayerInfo::ActivationFunction::TANH:
199 tmp = a * std::tanh(b * in);
200 break;
201 case ActivationLayerInfo::ActivationFunction::IDENTITY:
202 tmp = in;
203 break;
204 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
205 tmp = in * ((std::min(std::max((in + 3), 0.0f), 6.0f)) * 0.166666667f);
206 break;
207 default:
208 ARM_COMPUTE_ERROR("Unsupported activation function");
209 }
210 *(output_ptr + x) = tmp;
211 }
212 },
213 input, output);
214}
215} // namespace cpu
216} // namespace arm_compute
217
Sheri Zhangcece42c2021-02-10 15:32:38 +0000218#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */