blob: 66313593410782c13eb57631f7f508fc2bae8300 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/NEON/NEFixedPoint.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Validate.h"
33#include "arm_compute/core/Window.h"
34
35#include <arm_neon.h>
36#include <cstddef>
37#include <cstdint>
38
39using namespace arm_compute;
40
41namespace
42{
43// Internal load
44inline float32x4_t internal_vld1q(const float *in)
45{
46 return vld1q_f32(in);
47}
48inline qint8x16_t internal_vld1q(const qint8_t *in)
49{
50 return vld1q_qs8(in);
51}
52inline qint16x8_t internal_vld1q(const qint16_t *in)
53{
54 return vld1q_qs16(in);
55}
56
Pablo Tellof87cc7f2017-07-26 10:28:40 +010057inline qint32x4_t internal_vld1q(const qint32_t *in)
58{
59 return vld1q_s32(in);
60}
61
Anthony Barbier6ff3b192017-09-04 18:44:23 +010062// Internal store
63inline void internal_vst1q(float *p, const float32x4_t &v)
64{
65 vst1q_f32(p, v);
66}
67inline void internal_vst1q(qint8_t *p, const qint8x16_t &v)
68{
69 vst1q_qs8(p, v);
70}
71inline void internal_vst1q(qint8_t *p, const qint16x8_t &v)
72{
73 vst1_qs8(p, vqmovn_s16(v));
74}
75inline void internal_vst1q(qint16_t *p, const qint16x8_t &v)
76{
77 vst1q_qs16(p, v);
78}
79
Pablo Tellof87cc7f2017-07-26 10:28:40 +010080inline void internal_vst1q(qint32_t *p, const qint32x4_t &v)
81{
82 vst1q_s32(p, v);
83}
84
85inline void internal_vst1q(qint16_t *p, const qint32x4_t &v)
86{
87 vst1_qs16(p, vqmovn_qs32(v));
88}
89
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090// Internal vdup
91inline float32x4_t internal_vdupq_n(float v)
92{
93 return vdupq_n_f32(v);
94}
95inline qint8x16_t internal_vdupq_n(qint8_t v)
96{
97 return vdupq_n_qs8(v);
98}
99inline qint16x8_t internal_vdupq_n(qint16_t v)
100{
101 return vdupq_n_qs16(v);
102}
103
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100104inline qint32x4_t internal_vdupq_n(qint32_t v)
105{
106 return vdupq_n_qs32(v);
107}
108
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100109// Internal vadd
110inline float32x4_t internal_vqaddq(const float32x4_t &x, const float32x4_t &y)
111{
112 return vaddq_f32(x, y);
113}
114inline qint8x16_t internal_vqaddq(const qint8x16_t &x, const qint8x16_t &y)
115{
116 return vqaddq_qs8(x, y);
117}
118inline qint16x8_t internal_vqaddq(const qint16x8_t &x, const qint16x8_t &y)
119{
120 return vqaddq_qs16(x, y);
121}
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100122inline qint32x4_t internal_vqaddq(const qint32x4_t &x, const qint32x4_t &y)
123{
124 return vqaddq_qs32(x, y);
125}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100126
Pablo Tello0d176142017-07-06 16:43:14 +0100127#ifdef ARM_COMPUTE_ENABLE_FP16
128inline float16x8_t internal_vld1q(const float16_t *in)
129{
130 return vld1q_f16(in);
131}
132inline void internal_vst1q(float16_t *p, const float16x8_t &v)
133{
134 vst1q_f16(p, v);
135}
136inline float16x8_t internal_vdupq_n(float16_t v)
137{
138 return vdupq_n_f16(v);
139}
140inline float16x8_t internal_vqaddq(const float16x8_t &x, const float16x8_t &y)
141{
142 return vaddq_f16(x, y);
143}
144#endif /* ARM_COMPUTE_ENABLE_FP16 */
145
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100146template <typename T1, typename T2, bool in_place>
147void accumulate_bias(ITensor *input, const ITensor *bias, const Window window, ITensor *output)
148{
149 Iterator in(input, window);
150
151 if(in_place) // In place accumulate
152 {
153 execute_window_loop(window, [&](const Coordinates & id)
154 {
155 // Get bias and pointer to input
156 const auto in_ptr = reinterpret_cast<T1 *>(in.ptr());
157 const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
158
159 // Accumulate bias
160 internal_vst1q(in_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
161 },
162 in);
163 }
164 else // Out of place accumulate
165 {
166 Iterator out(output, window);
167 execute_window_loop(window, [&](const Coordinates & id)
168 {
169 // Get bias and pointer to input
170 const auto in_ptr = reinterpret_cast<const T1 *>(in.ptr());
171 const auto out_ptr = reinterpret_cast<T2 *>(out.ptr());
172 const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
173
174 // Accumulate bias
175 internal_vst1q(out_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
176 },
177 in, out);
178 }
179}
180} // namespace
181
182NEDirectConvolutionLayerBiasAccumulateKernel::NEDirectConvolutionLayerBiasAccumulateKernel()
183 : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr)
184{
185}
186
187void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, const ITensor *bias, ITensor *output)
188{
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100189 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
190 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::QS32, DataType::F32);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100191 ARM_COMPUTE_ERROR_ON(input->info()->fixed_point_position() != bias->info()->fixed_point_position());
192 if(output != nullptr)
193 {
194 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::QS16, DataType::F32);
195 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(bias, output);
196 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(bias, output);
197 }
198 ARM_COMPUTE_ERROR_ON(bias->info()->num_dimensions() > 1);
199
200 _func = nullptr;
201 _bias = bias;
202 _input = input;
203 _output = output;
204
205 const unsigned int num_elems_processed_per_iteration = 16 / element_size_from_data_type(input->info()->data_type());
206
207 // Configure kernel window
208 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
209 AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
210 AccessWindowStatic bias_access(bias->info(), 0, 0, bias->info()->dimension(0), bias->info()->dimension(1));
211 if(output != nullptr)
212 {
213 AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
214 update_window_and_padding(win, input_access, output_access, bias_access);
215 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
216 }
217 else
218 {
219 update_window_and_padding(win, input_access, bias_access);
220 input_access.set_valid_region(win, ValidRegion(Coordinates(), input->info()->tensor_shape()));
221 }
222 INEKernel::configure(win);
223
224 // Set appropriate function
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100225 switch(input->info()->data_type())
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100226 {
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100227 case DataType::QS8:
228 {
229 _func = (output == nullptr) ? &accumulate_bias<qint8_t, qint8_t, true> : &accumulate_bias<qint8_t, qint8_t, false>;
230 break;
231 }
232 case DataType::QS16:
233 {
234 if(bias->info()->data_type() == DataType::QS8)
235 {
236 _func = (output == nullptr) ? &accumulate_bias<qint16_t, qint8_t, true> : &accumulate_bias<qint16_t, qint8_t, false>;
237 }
238 else
239 {
240 ARM_COMPUTE_ERROR("Not implemented");
241 }
242 break;
243 }
244 case DataType::QS32:
245 {
246 _func = (output == nullptr) ? &accumulate_bias<qint32_t, qint16_t, true> : &accumulate_bias<qint32_t, qint16_t, false>;
247 break;
248 }
Pablo Tello0d176142017-07-06 16:43:14 +0100249#ifdef ARM_COMPUTE_ENABLE_FP16
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100250 case DataType::F16:
251 {
252 _func = (output == nullptr) ? &accumulate_bias<float16_t, float16_t, true> : &accumulate_bias<float16_t, float16_t, false>;
253 break;
254 }
Pablo Tello0d176142017-07-06 16:43:14 +0100255#endif /* ARM_COMPUTE_ENABLE_FP16 */
Pablo Tellof87cc7f2017-07-26 10:28:40 +0100256 case DataType::F32:
257 {
258 _func = (output == nullptr) ? &accumulate_bias<float, float, true> : &accumulate_bias<float, float, false>;
259 break;
260 }
261 default:
262 {
263 ARM_COMPUTE_ERROR("Unsupported combination of types among the inputs.");
264 break;
265 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100266 }
267}
268
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100269void NEDirectConvolutionLayerBiasAccumulateKernel::run(const Window &window, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100270{
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100271 ARM_COMPUTE_UNUSED(info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100272 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
273 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
274 ARM_COMPUTE_ERROR_ON(_func == nullptr);
275
276 (*_func)(_input, _bias, window, _output);
277}