blob: 8b3f2383abe6d0770b3a6bbd7e10b609d5918755 [file] [log] [blame]
Gian Marco58c57942017-11-28 09:10:03 +00001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/NEON/NEAsymm.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Utils.h"
33#include "arm_compute/core/Validate.h"
34#include "arm_compute/core/Window.h"
35
36#include <arm_neon.h>
37#include <cstddef>
38#include <cstdint>
39
40using namespace arm_compute;
41
42namespace
43{
Georgios Pinitas631c41a2017-12-06 11:53:03 +000044Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
Gian Marco58c57942017-11-28 09:10:03 +000045{
46 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
Gian Marco58c57942017-11-28 09:10:03 +000047 ARM_COMPUTE_RETURN_ERROR_ON(max > 255);
48 ARM_COMPUTE_RETURN_ERROR_ON(min < 0 || min > max);
49
50 // Check biases if exist
51 if(bias != nullptr)
52 {
53 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
54 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
55 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
56 }
Chunosov5124be52017-11-22 20:42:13 +070057
58 if(output->total_size() != 0)
59 {
60 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
61 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
62 }
63
Georgios Pinitas631c41a2017-12-06 11:53:03 +000064 return Status{};
Gian Marco58c57942017-11-28 09:10:03 +000065}
66
Georgios Pinitas631c41a2017-12-06 11:53:03 +000067std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output)
Gian Marco58c57942017-11-28 09:10:03 +000068{
Gian Marco7f0f7902017-12-07 09:26:56 +000069 // Note: This kernel performs 16 elements per iteration.
70 // However, since we use a left-over for loop, we cannot have any read or write out of memory
71 // For this reason num_elems_processed_per_iteration is set to 1
72 constexpr unsigned int num_elems_processed_per_iteration = 1;
Gian Marco58c57942017-11-28 09:10:03 +000073
74 // Configure kernel window
75 Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
76
77 AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
Gian Marco58c57942017-11-28 09:10:03 +000078
79 bool window_changed = update_window_and_padding(win,
Chunosov5124be52017-11-22 20:42:13 +070080 input_access);
81
82 if(output->total_size() != 0)
83 {
84 AccessWindowHorizontal output_result_access(output, 0, num_elems_processed_per_iteration);
85 window_changed = window_changed || update_window_and_padding(win, output_result_access);
86
87 output_result_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
88 }
Gian Marco58c57942017-11-28 09:10:03 +000089
90 if(bias != nullptr)
91 {
Gian Marco7f0f7902017-12-07 09:26:56 +000092 AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1));
Gian Marco58c57942017-11-28 09:10:03 +000093 window_changed = window_changed || update_window_and_padding(win, bias_access);
94 }
95
Georgios Pinitas631c41a2017-12-06 11:53:03 +000096 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
Gian Marco58c57942017-11-28 09:10:03 +000097 return std::make_pair(err, win);
98}
99
100template <bool is_bounded_relu>
101inline uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8x16_t min_u8,
102 uint8x16_t max_u8)
103{
104 const static int32x4_t zero_s32 = vdupq_n_s32(0);
105
106 // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
107 in_s32.val[0] = vqrdmulhq_n_s32(in_s32.val[0], result_fixedpoint_multiplier);
108 in_s32.val[1] = vqrdmulhq_n_s32(in_s32.val[1], result_fixedpoint_multiplier);
109 in_s32.val[2] = vqrdmulhq_n_s32(in_s32.val[2], result_fixedpoint_multiplier);
110 in_s32.val[3] = vqrdmulhq_n_s32(in_s32.val[3], result_fixedpoint_multiplier);
111
112 // Round to the nearest division by a power-of-two using result_shift_s32
113 in_s32.val[0] = rounding_divide_by_pow2(in_s32.val[0], result_shift);
114 in_s32.val[1] = rounding_divide_by_pow2(in_s32.val[1], result_shift);
115 in_s32.val[2] = rounding_divide_by_pow2(in_s32.val[2], result_shift);
116 in_s32.val[3] = rounding_divide_by_pow2(in_s32.val[3], result_shift);
117
118 // Add the offset terms
119 in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_after_shift_s32);
120 in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_after_shift_s32);
121 in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_after_shift_s32);
122 in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_after_shift_s32);
123
124 // Saturate negative values
125 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
126 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
127 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
128 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
129
130 // Convert S32 to S16
131 const int16x8x2_t in_s16 =
132 {
133 {
134 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
135 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
136 }
137 };
138
139 // Convert S16 to U8
140 uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
141
142 if(is_bounded_relu)
143 {
144 out_u8 = vmaxq_u8(out_u8, min_u8);
145 out_u8 = vminq_u8(out_u8, max_u8);
146 }
147
148 return out_u8;
149}
Gian Marco7f0f7902017-12-07 09:26:56 +0000150
151/* Function used by the left-over for loop to perform the quantization */
152template <bool is_bounded_relu>
153inline uint8_t finalize_quantization(int32x4_t in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8_t min_u8, uint8_t max_u8)
154{
155 const static int32x4_t zero_s32 = vdupq_n_s32(0);
156 const static int32x4_t sat_value_s32 = vdupq_n_s32(255);
157
158 // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
159 in_s32 = vqrdmulhq_n_s32(in_s32, result_fixedpoint_multiplier);
160
161 // Round to the nearest division by a power-of-two using result_shift_s32
162 in_s32 = rounding_divide_by_pow2(in_s32, result_shift);
163
164 // Add the offset terms
165 in_s32 = vaddq_s32(in_s32, result_offset_after_shift_s32);
166
167 // Saturate negative values
168 in_s32 = vmaxq_s32(in_s32, zero_s32);
169 in_s32 = vminq_s32(in_s32, sat_value_s32);
170
171 auto out_u8 = static_cast<uint8_t>(vgetq_lane_s32(in_s32, 0));
172
173 if(is_bounded_relu)
174 {
175 out_u8 = std::max(out_u8, min_u8);
176 out_u8 = std::min(out_u8, max_u8);
177 }
178
179 return out_u8;
180}
Gian Marco58c57942017-11-28 09:10:03 +0000181} // namespace
182
183namespace arm_compute
184{
185class Coordinates;
186} // namespace arm_compute
187
188template <bool is_bounded_relu>
189void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window &window)
190{
191 const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(_result_offset_after_shift);
192 const uint8x16_t min_u8 = vdupq_n_u8(static_cast<uint8_t>(_min));
193 const uint8x16_t max_u8 = vdupq_n_u8(static_cast<uint8_t>(_max));
194
195 ARM_COMPUTE_UNUSED(min_u8);
196 ARM_COMPUTE_UNUSED(max_u8);
197
Gian Marco7f0f7902017-12-07 09:26:56 +0000198 const int window_step_x = 16;
199 const auto window_start_x = static_cast<int>(window.x().start());
200 const auto window_end_x = static_cast<int>(window.x().end());
201
202 Window win(window);
203 win.set(Window::DimX, Window::Dimension(0, 1, 1));
204
205 Iterator in(_input, win);
206 Iterator out(_output, win);
Gian Marco58c57942017-11-28 09:10:03 +0000207
208 if(_bias != nullptr)
209 {
210 Window win_biases;
Gian Marco7f0f7902017-12-07 09:26:56 +0000211 win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
Gian Marco58c57942017-11-28 09:10:03 +0000212 win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
213
214 Iterator bias(_bias, win_biases);
Gian Marco7f0f7902017-12-07 09:26:56 +0000215 execute_window_loop(win, [&](const Coordinates & id)
Gian Marco58c57942017-11-28 09:10:03 +0000216 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000217 // Compute 16 elements per iteration
218 int x = window_start_x;
219 for(; x <= (window_end_x - window_step_x); x += window_step_x)
Gian Marco58c57942017-11-28 09:10:03 +0000220 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000221 int32x4x4_t in_s32 =
Gian Marco58c57942017-11-28 09:10:03 +0000222 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000223 {
224 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
225 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
226 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
227 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
228 }
229 };
Gian Marco58c57942017-11-28 09:10:03 +0000230
Gian Marco7f0f7902017-12-07 09:26:56 +0000231 const int32x4x4_t bias_s32 =
232 {
233 {
234 vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 0),
235 vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 4),
236 vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 8),
237 vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 12)
238 }
239 };
240
241 // Add the bias to GEMM's result
242 in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
243 in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
244 in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
245 in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
246
247 vst1q_u8(out.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8));
248 }
249
250 // Compute left-over elements
251 for(; x < window_end_x; ++x)
Gian Marco58c57942017-11-28 09:10:03 +0000252 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000253 const int32_t bias_value = *(reinterpret_cast<const int32_t *>(bias.ptr()) + x);
254 int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
Gian Marco58c57942017-11-28 09:10:03 +0000255
Gian Marco7f0f7902017-12-07 09:26:56 +0000256 // Add bias
257 in_value += bias_value;
Gian Marco58c57942017-11-28 09:10:03 +0000258
Gian Marco7f0f7902017-12-07 09:26:56 +0000259 // Finalize and store the result
260 *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(vdupq_n_s32(in_value), _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast<uint8_t>(_min),
261 static_cast<uint8_t>(_max));
262 }
Gian Marco58c57942017-11-28 09:10:03 +0000263 },
264 in, bias, out);
265 }
266 else
267 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000268 execute_window_loop(win, [&](const Coordinates & id)
Gian Marco58c57942017-11-28 09:10:03 +0000269 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000270 // Compute 16 elements per iteration
271 int x = window_start_x;
272 for(; x <= (window_end_x - window_step_x); x += window_step_x)
Gian Marco58c57942017-11-28 09:10:03 +0000273 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000274 int32x4x4_t in_s32 =
Gian Marco58c57942017-11-28 09:10:03 +0000275 {
Gian Marco7f0f7902017-12-07 09:26:56 +0000276 {
277 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
278 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
279 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
280 vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
281 }
282 };
Gian Marco58c57942017-11-28 09:10:03 +0000283
Gian Marco7f0f7902017-12-07 09:26:56 +0000284 vst1q_u8(out.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8));
285 }
286
287 // Compute left-over elements
288 for(; x < window_end_x; ++x)
289 {
290 const int32x4_t in_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x);
291
292 // Finalize and store the result
293 *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max));
294 }
Gian Marco58c57942017-11-28 09:10:03 +0000295 },
296 in, out);
297 }
298}
299
300NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel()
301 : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr), _result_fixedpoint_multiplier(0), _result_shift(0), _result_offset_after_shift(0), _min(0), _max(0)
302{
303}
304
305void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift,
306 int result_offset_after_shift, int min, int max)
307{
308 // Perform validate step
309 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
310
311 // Output auto inizialitation if not yet initialized
312 auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(DataType::QASYMM8));
313
314 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
315 (bias != nullptr) ? bias->info() : nullptr,
316 output->info(),
317 min,
318 max));
319
320 _input = input;
321 _bias = bias;
322 _output = output;
323 _result_fixedpoint_multiplier = result_fixedpoint_multiplier;
324 _result_shift = result_shift;
325 _result_offset_after_shift = result_offset_after_shift;
326 _min = min;
327 _max = max;
328
329 // Configure kernel window
330 auto win_config = validate_and_configure_window(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info());
331 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
332 INEKernel::configure(win_config.second);
333
334 // Check if we need to clamp the result using min and max
335 const bool is_bounded_relu = ((min != max) && !(min == 0 && max == 255));
336 _func = is_bounded_relu ? &NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run<true> : &NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run<false>;
337}
338
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000339Status NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
Gian Marco58c57942017-11-28 09:10:03 +0000340{
Chunosov5124be52017-11-22 20:42:13 +0700341 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
Gian Marco58c57942017-11-28 09:10:03 +0000342 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
343 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
344 (bias != nullptr) ? bias->clone().get() : nullptr,
345 output->clone().get())
346 .first);
347
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000348 return Status{};
Gian Marco58c57942017-11-28 09:10:03 +0000349}
350
351void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window &window, const ThreadInfo &info)
352{
353 ARM_COMPUTE_UNUSED(info);
354 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
355 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
356
357 (this->*_func)(window);
358}