blob: effc50e7c0a4244c93125b48418fee14f3b130d6 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/NEON/NEFixedPoint.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Validate.h"
33#include "arm_compute/core/Window.h"
34
35#include <arm_neon.h>
36#include <cstddef>
37#include <cstdint>
38
39using namespace arm_compute;
40
41namespace
42{
43// Internal load
44inline float32x4_t internal_vld1q(const float *in)
45{
46 return vld1q_f32(in);
47}
48inline qint8x16_t internal_vld1q(const qint8_t *in)
49{
50 return vld1q_qs8(in);
51}
52inline qint16x8_t internal_vld1q(const qint16_t *in)
53{
54 return vld1q_qs16(in);
55}
56
57// Internal store
58inline void internal_vst1q(float *p, const float32x4_t &v)
59{
60 vst1q_f32(p, v);
61}
62inline void internal_vst1q(qint8_t *p, const qint8x16_t &v)
63{
64 vst1q_qs8(p, v);
65}
66inline void internal_vst1q(qint8_t *p, const qint16x8_t &v)
67{
68 vst1_qs8(p, vqmovn_s16(v));
69}
70inline void internal_vst1q(qint16_t *p, const qint16x8_t &v)
71{
72 vst1q_qs16(p, v);
73}
74
75// Internal vdup
76inline float32x4_t internal_vdupq_n(float v)
77{
78 return vdupq_n_f32(v);
79}
80inline qint8x16_t internal_vdupq_n(qint8_t v)
81{
82 return vdupq_n_qs8(v);
83}
84inline qint16x8_t internal_vdupq_n(qint16_t v)
85{
86 return vdupq_n_qs16(v);
87}
88
89// Internal vadd
90inline float32x4_t internal_vqaddq(const float32x4_t &x, const float32x4_t &y)
91{
92 return vaddq_f32(x, y);
93}
94inline qint8x16_t internal_vqaddq(const qint8x16_t &x, const qint8x16_t &y)
95{
96 return vqaddq_qs8(x, y);
97}
98inline qint16x8_t internal_vqaddq(const qint16x8_t &x, const qint16x8_t &y)
99{
100 return vqaddq_qs16(x, y);
101}
102
103template <typename T1, typename T2, bool in_place>
104void accumulate_bias(ITensor *input, const ITensor *bias, const Window window, ITensor *output)
105{
106 Iterator in(input, window);
107
108 if(in_place) // In place accumulate
109 {
110 execute_window_loop(window, [&](const Coordinates & id)
111 {
112 // Get bias and pointer to input
113 const auto in_ptr = reinterpret_cast<T1 *>(in.ptr());
114 const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
115
116 // Accumulate bias
117 internal_vst1q(in_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
118 },
119 in);
120 }
121 else // Out of place accumulate
122 {
123 Iterator out(output, window);
124 execute_window_loop(window, [&](const Coordinates & id)
125 {
126 // Get bias and pointer to input
127 const auto in_ptr = reinterpret_cast<const T1 *>(in.ptr());
128 const auto out_ptr = reinterpret_cast<T2 *>(out.ptr());
129 const auto vb = internal_vdupq_n(static_cast<T1>(*reinterpret_cast<const T2 *>(bias->ptr_to_element(Coordinates(id.z())))));
130
131 // Accumulate bias
132 internal_vst1q(out_ptr, internal_vqaddq(internal_vld1q(in_ptr), vb));
133 },
134 in, out);
135 }
136}
137} // namespace
138
139NEDirectConvolutionLayerBiasAccumulateKernel::NEDirectConvolutionLayerBiasAccumulateKernel()
140 : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr)
141{
142}
143
144void NEDirectConvolutionLayerBiasAccumulateKernel::configure(ITensor *input, const ITensor *bias, ITensor *output)
145{
146 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32);
147 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::QS8, DataType::QS16, DataType::F32);
148 ARM_COMPUTE_ERROR_ON(input->info()->fixed_point_position() != bias->info()->fixed_point_position());
149 if(output != nullptr)
150 {
151 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::QS16, DataType::F32);
152 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(bias, output);
153 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(bias, output);
154 }
155 ARM_COMPUTE_ERROR_ON(bias->info()->num_dimensions() > 1);
156
157 _func = nullptr;
158 _bias = bias;
159 _input = input;
160 _output = output;
161
162 const unsigned int num_elems_processed_per_iteration = 16 / element_size_from_data_type(input->info()->data_type());
163
164 // Configure kernel window
165 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
166 AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
167 AccessWindowStatic bias_access(bias->info(), 0, 0, bias->info()->dimension(0), bias->info()->dimension(1));
168 if(output != nullptr)
169 {
170 AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
171 update_window_and_padding(win, input_access, output_access, bias_access);
172 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
173 }
174 else
175 {
176 update_window_and_padding(win, input_access, bias_access);
177 input_access.set_valid_region(win, ValidRegion(Coordinates(), input->info()->tensor_shape()));
178 }
179 INEKernel::configure(win);
180
181 // Set appropriate function
182 if(input->info()->data_type() == DataType::F32)
183 {
184 _func = (output == nullptr) ? &accumulate_bias<float, float, true> : &accumulate_bias<float, float, false>;
185 }
186 else if(input->info()->data_type() == DataType::QS8)
187 {
188 _func = (output == nullptr) ? &accumulate_bias<qint8_t, qint8_t, true> : &accumulate_bias<qint8_t, qint8_t, false>;
189 }
190 else if(input->info()->data_type() == DataType::QS16 && bias->info()->data_type() == DataType::QS8)
191 {
192 _func = (output == nullptr) ? &accumulate_bias<qint16_t, qint8_t, true> : &accumulate_bias<qint16_t, qint8_t, false>;
193 }
194 else
195 {
196 ARM_COMPUTE_ERROR("Unsupported combination of types among the inputs.");
197 }
198}
199
200void NEDirectConvolutionLayerBiasAccumulateKernel::run(const Window &window)
201{
202 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
203 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
204 ARM_COMPUTE_ERROR_ON(_func == nullptr);
205
206 (*_func)(_input, _bias, window, _output);
207}