blob: 610db0522468b77a9747275dfed1a681e0450051 [file] [log] [blame]
Michalis Spyrouc4d45552020-10-19 12:41:30 +01001/*
2 * Copyright (c) 2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Helpers.h"
25#include "arm_compute/core/ITensorPack.h"
26#include "arm_compute/core/Window.h"
27#include "src/core/NEON/NEMath.h"
28#include "src/core/NEON/wrapper/wrapper.h"
29#include "src/core/common/StdTypes.h"
30#include "src/core/common/Validate.h"
31
32#include <arm_neon.h>
33#include <cmath>
34#include <cstddef>
35
36namespace arm_compute
37{
38namespace cpu
39{
40namespace
41{
42#ifndef __aarch64__
43inline float32x4_t mask_float_vector(const float32x4_t &in, const uint32x4_t &mask)
44{
45 auto int_in = vreinterpretq_u32_f32(in);
46 return vreinterpretq_f32_u32(wrapper::vand(int_in, mask));
47}
48#endif /* __arch64__ */
49} // namespace
50
51void fp32_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
52{
53 /** NEON vector tag type. */
54 using ExactTagType = typename arm_compute::wrapper::traits::neon_bitvector_tag_t<float, wrapper::traits::BitWidth::W128>;
55
56 constexpr int window_step_x = 4;
57 const auto window_start_x = static_cast<int>(window.x().start());
58 const auto window_end_x = static_cast<int>(window.x().end());
59 const ActivationLayerInfo::ActivationFunction act = act_info.activation();
60
61 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
62 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
63
64 Iterator input(src, win_collapsed);
65 Iterator output(dst, win_collapsed);
66
67 // In case of non-aarch64, a small delta value is added to the input
68 // to prevent NAN values caused by zeros in inputs to SQRT.
69 // In case of aarh64, we call vsqrt directly, so we don't use delta.
70#ifndef __aarch64__
71 const auto delta = wrapper::vdup_n(static_cast<float>(1e-24), ExactTagType {});
72#endif /* __aarch64 */
73 const auto const_1 = wrapper::vdup_n(static_cast<float>(1.f), ExactTagType {});
74 const auto const_0 = wrapper::vdup_n(static_cast<float>(0.f), ExactTagType{});
75 const auto const_6 = wrapper::vdup_n(static_cast<float>(6.f), ExactTagType{});
76 const auto const_3 = wrapper::vdup_n(static_cast<float>(3.f), ExactTagType{});
77 const auto const_inv_6 = wrapper::vdup_n(static_cast<float>(0.166666667f), ExactTagType{});
78
79 const auto va = wrapper::vdup_n(static_cast<float>(act_info.a()), ExactTagType{});
80 const auto vb = wrapper::vdup_n(static_cast<float>(act_info.b()), ExactTagType{});
81 const auto a = static_cast<float>(act_info.a());
82 const auto b = static_cast<float>(act_info.b());
83 execute_window_loop(win_collapsed, [&](const Coordinates &)
84 {
85 const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
86 const auto output_ptr = reinterpret_cast<float *>(output.ptr());
87
88 wrapper::traits::neon_bitvector_t<float, wrapper::traits::BitWidth::W128> tmp;
89
90 // Compute S elements per iteration
91 int x = window_start_x;
92 for(; x <= (window_end_x - window_step_x); x += window_step_x)
93 {
94 const auto vin = wrapper::vloadq(input_ptr + x);
95 switch(act)
96 {
97 case ActivationLayerInfo::ActivationFunction::ABS:
98 tmp = wrapper::vabs(vin);
99 break;
100 case ActivationLayerInfo::ActivationFunction::LINEAR:
101 tmp = wrapper::vmla(vb, va, vin);
102 break;
103 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
104 tmp = wrapper::vinv(wrapper::vadd(const_1, wrapper::vexpq(wrapper::vneg(vin))));
105 break;
106 case ActivationLayerInfo::ActivationFunction::RELU:
107 tmp = wrapper::vmax(const_0, vin);
108 break;
109 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
110 tmp = wrapper::vmin(va, wrapper::vmax(const_0, vin));
111 break;
112 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
113 tmp = wrapper::vmin(va, wrapper::vmax(vb, vin));
114 break;
115 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
116 tmp = wrapper::vbsl(wrapper::vcgt(vin, const_0), vin, wrapper::vmul(va, vin));
117 break;
118 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
119 tmp = wrapper::vlog(wrapper::vadd(const_1, wrapper::vexpq(vin)));
120 break;
121 case ActivationLayerInfo::ActivationFunction::ELU:
122 tmp = wrapper::vbsl(wrapper::vcge(vin, const_0), vin, wrapper::vmul(va, wrapper::vsub(wrapper::vexpq(vin), const_1)));
123 break;
124 case ActivationLayerInfo::ActivationFunction::SQRT:
125#ifdef __aarch64__
126 tmp = wrapper::vsqrt(vin);
127#else /* aarch64 */
128 {
129 const auto bitmask = wrapper::vceq(vin, wrapper::vdup_n(0.f, ExactTagType{}));
130 tmp = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, mask_float_vector(delta, bitmask))));
131 tmp = mask_float_vector(tmp, wrapper::vnot(bitmask));
132 }
133#endif /* aarch64 */
134 break;
135 case ActivationLayerInfo::ActivationFunction::SQUARE:
136 tmp = wrapper::vmul(vin, vin);
137 break;
138 case ActivationLayerInfo::ActivationFunction::TANH:
139 tmp = wrapper::vmul(va, wrapper::vtanh(wrapper::vmul(vb, vin)));
140 break;
141 case ActivationLayerInfo::ActivationFunction::IDENTITY:
142 tmp = vin;
143 break;
144 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
145 tmp = wrapper::vmul(vin, wrapper::vmul(const_inv_6, wrapper::vmin(const_6, wrapper::vmax(const_0, wrapper::vadd(vin, const_3)))));
146 break;
147 default:
148 ARM_COMPUTE_ERROR("Unsupported activation function");
149 }
150 wrapper::vstore(output_ptr + x, tmp);
151 }
152
153 // Compute left-over elements
154 for(; x < window_end_x; ++x)
155 {
156 const float in = *(reinterpret_cast<const float *>(input_ptr + x));
157 float tmp;
158 switch(act)
159 {
160 case ActivationLayerInfo::ActivationFunction::ABS:
161 tmp = std::abs(in);
162 break;
163 case ActivationLayerInfo::ActivationFunction::LINEAR:
164 tmp = a * in + b;
165 break;
166 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
167 tmp = static_cast<float>(1) / (static_cast<float>(1) + std::exp(-in));
168 break;
169 case ActivationLayerInfo::ActivationFunction::RELU:
170 tmp = std::max<float>(static_cast<float>(0), in);
171 break;
172 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
173 tmp = std::min<float>(a, std::max(static_cast<float>(0), in));
174 break;
175 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
176 tmp = std::min<float>(a, std::max<float>(b, in));
177 break;
178 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
179 tmp = (in > 0) ? in : a * in;
180 break;
181 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
182 tmp = std::log(static_cast<float>(1) + std::exp(in));
183 break;
184 case ActivationLayerInfo::ActivationFunction::ELU:
185 tmp = (in >= 0) ? in : a * (std::exp(in) - 1);
186 break;
187 case ActivationLayerInfo::ActivationFunction::SQRT:
188 tmp = std::sqrt(in);
189 break;
190 case ActivationLayerInfo::ActivationFunction::SQUARE:
191 tmp = in * in;
192 break;
193 case ActivationLayerInfo::ActivationFunction::TANH:
194 tmp = a * std::tanh(b * in);
195 break;
196 case ActivationLayerInfo::ActivationFunction::IDENTITY:
197 tmp = in;
198 break;
199 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
200 tmp = in * ((std::min(std::max((in + 3), 0.0f), 6.0f)) * 0.166666667f);
201 break;
202 default:
203 ARM_COMPUTE_ERROR("Unsupported activation function");
204 }
205 *(output_ptr + x) = tmp;
206 }
207 },
208 input, output);
209}
210} // namespace cpu
211} // namespace arm_compute