blob: d33431a8d283bb5787c4dbbe12bc2e6a38979a53 [file] [log] [blame]
Manuel Bottini769c6382019-08-22 13:13:48 +01001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2019-2021 Arm Limited.
Manuel Bottini769c6382019-08-22 13:13:48 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouebcebf12020-10-21 00:04:14 +010024#include "src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h"
Manuel Bottini769c6382019-08-22 13:13:48 +010025
Manuel Bottini769c6382019-08-22 13:13:48 +010026#include "arm_compute/core/Error.h"
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/ITensor.h"
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +010029#include "arm_compute/core/KernelDescriptors.h"
Manuel Bottini769c6382019-08-22 13:13:48 +010030#include "arm_compute/core/TensorInfo.h"
31#include "arm_compute/core/Utils.h"
32#include "arm_compute/core/Validate.h"
33#include "arm_compute/core/Window.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010034#include "src/core/CPP/Validate.h"
Georgios Pinitasddb93bb2020-10-02 16:38:59 +010035#include "src/core/NEON/NEMath.h"
36#include "src/core/NEON/wrapper/wrapper.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010037#include "src/core/helpers/AutoConfiguration.h"
38#include "src/core/helpers/WindowHelpers.h"
Manuel Bottini769c6382019-08-22 13:13:48 +010039
40#include <arm_neon.h>
41
42namespace arm_compute
43{
44namespace
45{
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +010046template <typename InputType, typename AccType = InputType>
47void vector_float_sum(AccType &result, AccType &result_square, const InputType &inputs)
48{
49 result = wrapper::vadd(result, inputs);
50 result_square = wrapper::vadd(result_square, wrapper::vmul(inputs, inputs));
51}
52
53#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
54template <>
55inline void vector_float_sum(float32x4_t &result, float32x4_t &result_square, const float16x8_t &inputs)
56{
57 vector_float_sum(result, result_square, wrapper::vcvt<float>(wrapper::vgetlow(inputs)));
58 vector_float_sum(result, result_square, wrapper::vcvt<float>(wrapper::vgethigh(inputs)));
59}
60#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
61
62template <typename InputType, typename AccType = InputType>
63InputType vector_float_norm(const InputType &inputs, const AccType &vec_mean, const AccType &vec_multip, const AccType &vec_beta)
64{
65 return wrapper::vadd(wrapper::vmul(wrapper::vsub(inputs, vec_mean), vec_multip), vec_beta);
66}
67
68#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
69template <>
70inline float16x8_t vector_float_norm(const float16x8_t &inputs, const float32x4_t &vec_mean, const float32x4_t &vec_multip, const float32x4_t &vec_beta)
71{
72 const auto input_low = wrapper::vcvt<float>(wrapper::vgetlow(inputs));
73 const auto input_high = wrapper::vcvt<float>(wrapper::vgethigh(inputs));
74 const auto result_low = wrapper::vcvt<float16_t>(vector_float_norm(input_low, vec_mean, vec_multip, vec_beta));
75 const auto result_high = wrapper::vcvt<float16_t>(vector_float_norm(input_high, vec_mean, vec_multip, vec_beta));
76 float16x8_t result = wrapper::vcombine(result_low, result_high);
77
78 return result;
79}
80#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
81
82template <typename T, typename AccType = T>
Manuel Bottini769c6382019-08-22 13:13:48 +010083void instance_normalization_nchw(ITensor *input, ITensor *output, float gamma, float beta, float epsilon, const Window &window)
84{
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +000085 /** SIMD vector tag type. */
Manuel Bottini769c6382019-08-22 13:13:48 +010086 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
87
88 // Clear X/Y dimensions on execution window as we handle the planes manually
89 Window win = window;
90 win.set(Window::DimX, Window::Dimension(0, 1, 1));
91 win.set(Window::DimY, Window::Dimension(0, 1, 1));
92
93 constexpr int window_step_x = 16 / sizeof(T);
94 const unsigned int elements_plane = input->info()->dimension(0) * output->info()->dimension(1);
95
96 Iterator input_it(input, win);
97 execute_window_loop(win, [&](const Coordinates & id)
98 {
99 Window win_plane = window;
100 win_plane.set(Window::DimX, Window::Dimension(0, 1, 1));
101 win_plane.set(Window::DimZ, Window::Dimension(id[2], id[2] + 1, 1));
102 win_plane.set(3, Window::Dimension(id[3], id[3] + 1, 1));
103
104 Iterator input_plane_it(input, win_plane);
105 Iterator output_plane_it(output, win_plane);
106
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100107 auto sum_h_w = static_cast<AccType>(0.f);
108 auto sum_squares_h_w = static_cast<AccType>(0.f);
Manuel Bottini769c6382019-08-22 13:13:48 +0100109
110 execute_window_loop(win_plane, [&](const Coordinates &)
111 {
112 const auto input_ptr = reinterpret_cast<const T *>(input_plane_it.ptr());
113
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100114 auto vec_sum_h_w = wrapper::vdup_n(static_cast<AccType>(0.f), ExactTagType{});
115 auto vec_sum_squares_h_w = wrapper::vdup_n(static_cast<AccType>(0.f), ExactTagType{});
Manuel Bottini769c6382019-08-22 13:13:48 +0100116
117 // Compute S elements per iteration
118 int x = window.x().start();
119 for(; x <= (window.x().end() - window_step_x); x += window_step_x)
120 {
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100121 auto vec_input_val = wrapper::vloadq(input_ptr + x);
122 vector_float_sum(vec_sum_h_w, vec_sum_squares_h_w, vec_input_val);
Manuel Bottini769c6382019-08-22 13:13:48 +0100123 }
124
125 auto vec2_sum_h_w = wrapper::vpadd(wrapper::vgethigh(vec_sum_h_w), wrapper::vgetlow(vec_sum_h_w));
126 auto vec2_sum_squares_h_w = wrapper::vpadd(wrapper::vgethigh(vec_sum_squares_h_w), wrapper::vgetlow(vec_sum_squares_h_w));
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100127
128 vec2_sum_h_w = wrapper::vpadd(vec2_sum_h_w, vec2_sum_h_w);
129 vec2_sum_squares_h_w = wrapper::vpadd(vec2_sum_squares_h_w, vec2_sum_squares_h_w);
130
Manuel Bottini769c6382019-08-22 13:13:48 +0100131 sum_h_w += wrapper::vgetlane(vec2_sum_h_w, 0);
132 sum_squares_h_w += wrapper::vgetlane(vec2_sum_squares_h_w, 0);
133
134 // Compute left-over elements
135 for(; x < window.x().end(); ++x)
136 {
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100137 const auto value = static_cast<AccType>(*(input_ptr + x));
Manuel Bottini769c6382019-08-22 13:13:48 +0100138 sum_h_w += value;
139 sum_squares_h_w += value * value;
140 }
141 },
142 input_plane_it, output_plane_it);
143
144 const auto mean_h_w = sum_h_w / elements_plane;
145 const auto var_h_w = sum_squares_h_w / elements_plane - mean_h_w * mean_h_w;
146
147 const auto multip_h_w = gamma / std::sqrt(var_h_w + epsilon);
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100148 const auto vec_mean_h_w = wrapper::vdup_n(static_cast<AccType>(mean_h_w), ExactTagType{});
149 const auto vec_multip_h_w = wrapper::vdup_n(static_cast<AccType>(multip_h_w), ExactTagType{});
150 const auto vec_beta = wrapper::vdup_n(static_cast<AccType>(beta), ExactTagType{});
Manuel Bottini769c6382019-08-22 13:13:48 +0100151
152 execute_window_loop(win_plane, [&](const Coordinates &)
153 {
154 auto input_ptr = reinterpret_cast<T *>(input_plane_it.ptr());
155 auto output_ptr = reinterpret_cast<T *>(output_plane_it.ptr());
156
157 // Compute S elements per iteration
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100158 int x = window.x().start();
159 //auto vec_val = wrapper::vdup_n(static_cast<T>(0.0f), ExactTagType{});
Manuel Bottini769c6382019-08-22 13:13:48 +0100160 for(; x <= (window.x().end() - window_step_x); x += window_step_x)
161 {
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100162 const auto vec_val = wrapper::vloadq(input_ptr + x);
163 const auto normalized_vec = vector_float_norm(vec_val, vec_mean_h_w, vec_multip_h_w, vec_beta);
164 wrapper::vstore(output_ptr + x, normalized_vec);
Manuel Bottini769c6382019-08-22 13:13:48 +0100165 }
166
167 // Compute left-over elements
168 for(; x < window.x().end(); ++x)
169 {
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100170 const auto val = static_cast<AccType>(*(input_ptr + x));
171 *(output_ptr + x) = static_cast<T>((val - mean_h_w) * multip_h_w + beta);
Manuel Bottini769c6382019-08-22 13:13:48 +0100172 }
173 },
174 input_plane_it, output_plane_it);
175 },
176 input_it);
177}
178
179Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon)
180{
181 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
182 ARM_COMPUTE_UNUSED(gamma);
183 ARM_COMPUTE_UNUSED(beta);
184 ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0");
185
Manuel Bottini581f1782019-11-13 17:24:43 +0000186 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
Manuel Bottini769c6382019-08-22 13:13:48 +0100187 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC, "NHWC data layout is not supported by the kernel directly");
188
189 if(output != nullptr && output->total_size() != 0)
190 {
191 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
192 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
193 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
Manuel Bottini581f1782019-11-13 17:24:43 +0000194 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels");
Manuel Bottini769c6382019-08-22 13:13:48 +0100195 }
Manuel Bottini769c6382019-08-22 13:13:48 +0100196 return Status{};
197}
198
199std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
200{
201 // We handle the planes manually
202 Window win = calculate_max_window(*input, Steps(1));
203
204 // Output auto initialization if not yet initialized
205 auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
206
207 // NEInstanceNormalizationLayerKernel doesn't need padding so update_window_and_padding() can be skipped
Manuel Bottini769c6382019-08-22 13:13:48 +0100208 return std::make_pair(Status{}, win);
209}
210} // namespace
211
212NEInstanceNormalizationLayerKernel::NEInstanceNormalizationLayerKernel()
213 : _func(nullptr), _input(nullptr), _output(nullptr), _gamma(1), _beta(0), _epsilon(1e-12)
214{
215}
216
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100217void NEInstanceNormalizationLayerKernel::configure(ITensor *input, ITensor *output, const InstanceNormalizationLayerKernelInfo &info)
Manuel Bottini769c6382019-08-22 13:13:48 +0100218{
219 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
220
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100221 _input = input;
222 _output = output == nullptr ? input : output;
223 _gamma = info.gamma;
224 _beta = info.beta;
225 _epsilon = info.epsilon;
226 _use_mixed_precision = info.use_mixed_precision;
Manuel Bottini769c6382019-08-22 13:13:48 +0100227
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100228 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), _gamma, _beta, _epsilon));
Manuel Bottini769c6382019-08-22 13:13:48 +0100229
230 if(_input->info()->data_type() == DataType::F32)
231 {
232 _func = &instance_normalization_nchw<float>;
233 }
234#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
235 else if(_input->info()->data_type() == DataType::F16)
236 {
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100237 if(_use_mixed_precision)
238 {
239 _func = &instance_normalization_nchw<float16_t, float>;
240 }
241 else
242 {
243 _func = &instance_normalization_nchw<float16_t>;
244 }
Manuel Bottini769c6382019-08-22 13:13:48 +0100245 }
246#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
247 else
248 {
249 ARM_COMPUTE_ERROR("Unsupported data type");
250 }
251
252 // Configure kernel window
253 auto win_config = validate_and_configure_window(_input->info(), _output->info());
254 ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
255
256 INEKernel::configure(std::get<1>(win_config));
257}
258
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100259Status NEInstanceNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info)
Manuel Bottini769c6382019-08-22 13:13:48 +0100260{
Sang-Hoon Park3351f2a2020-07-16 14:26:16 +0100261 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info.gamma, info.beta, info.epsilon));
Manuel Bottini769c6382019-08-22 13:13:48 +0100262 ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (output == nullptr ? input->clone().get() : output->clone().get()))));
263 return Status{};
264}
265
266void NEInstanceNormalizationLayerKernel::run(const Window &window, const ThreadInfo &info)
267{
268 ARM_COMPUTE_UNUSED(info);
269 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
270 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
271 (*_func)(_input, _output, _gamma, _beta, _epsilon, window);
272}
273} // namespace arm_compute