Luca Foschiani | 4b86953 | 2020-02-13 15:07:36 +0000 | [diff] [blame] | 1 | /* |
Michele Di Giorgio | d9eaf61 | 2020-07-08 11:12:57 +0100 | [diff] [blame^] | 2 | * Copyright (c) 2020 Arm Limited. |
Luca Foschiani | 4b86953 | 2020-02-13 15:07:36 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ScaleKernel.h" |
| 25 | |
| 26 | #include "arm_compute/core/AccessWindowStatic.h" |
| 27 | #include "arm_compute/core/Error.h" |
| 28 | #include "arm_compute/core/Helpers.h" |
| 29 | #include "arm_compute/core/ITensor.h" |
| 30 | #include "arm_compute/core/NEON/wrapper/wrapper.h" |
| 31 | #include "arm_compute/core/Types.h" |
| 32 | #include "arm_compute/core/Utils.h" |
| 33 | #include "arm_compute/core/Validate.h" |
| 34 | #include "arm_compute/core/Window.h" |
| 35 | #include "arm_compute/core/utils/quantization/AsymmHelpers.h" |
| 36 | |
| 37 | #include <arm_neon.h> |
| 38 | #include <cstddef> |
| 39 | #include <cstdint> |
| 40 | |
| 41 | namespace arm_compute |
| 42 | { |
| 43 | Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage) |
| 44 | { |
| 45 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32); |
| 46 | |
| 47 | ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))); |
| 48 | ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) |
| 49 | || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound); |
| 50 | |
| 51 | // Check biases if exist |
| 52 | if(bias != nullptr) |
| 53 | { |
| 54 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); |
| 55 | ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); |
| 56 | ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0)); |
| 57 | } |
| 58 | |
| 59 | if(output->total_size() != 0) |
| 60 | { |
| 61 | if(output->data_type() != output_stage->output_data_type && (output_stage->output_data_type == DataType::QASYMM8 || output_stage->output_data_type == DataType::QASYMM8_SIGNED)) |
| 62 | { |
| 63 | ARM_COMPUTE_RETURN_ERROR_MSG("Mismatching data types"); |
| 64 | } |
| 65 | |
| 66 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); |
| 67 | } |
| 68 | |
| 69 | return Status{}; |
| 70 | } |
| 71 | |
| 72 | inline void scale_input(int32x4x4_t &in_s32, int32x4_t result_offset_s32, int32_t result_mult_int) |
| 73 | { |
| 74 | // Add the offset terms to GEMM's result |
| 75 | in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_s32); |
| 76 | in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_s32); |
| 77 | in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_s32); |
| 78 | in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_s32); |
| 79 | |
| 80 | // Multiply by result_mult_int |
| 81 | in_s32.val[0] = vmulq_n_s32(in_s32.val[0], result_mult_int); |
| 82 | in_s32.val[1] = vmulq_n_s32(in_s32.val[1], result_mult_int); |
| 83 | in_s32.val[2] = vmulq_n_s32(in_s32.val[2], result_mult_int); |
| 84 | in_s32.val[3] = vmulq_n_s32(in_s32.val[3], result_mult_int); |
| 85 | } |
| 86 | |
| 87 | template <typename T> |
| 88 | inline typename std::enable_if<std::is_same<T, uint8_t>::value, |
| 89 | typename wrapper::traits::neon_vector<T, 16>::type>::type |
| 90 | convert_to_8bit(const int16x8x2_t in_s16) |
| 91 | { |
| 92 | return wrapper::vcombine(wrapper::vqmovun(in_s16.val[0]), wrapper::vqmovun(in_s16.val[1])); |
| 93 | } |
| 94 | |
| 95 | template <typename T> |
| 96 | inline typename std::enable_if<std::is_same<T, int8_t>::value, |
| 97 | typename wrapper::traits::neon_vector<T, 16>::type>::type |
| 98 | convert_to_8bit(const int16x8x2_t in_s16) |
| 99 | { |
| 100 | return wrapper::vcombine(wrapper::vqmovn(in_s16.val[0]), wrapper::vqmovn(in_s16.val[1])); |
| 101 | } |
| 102 | |
| 103 | template <typename T> |
| 104 | inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector<T, 16>::type min, |
| 105 | typename wrapper::traits::neon_vector<T, 16>::type max) |
| 106 | { |
| 107 | // Shift final result (negative value shift right) |
| 108 | in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32); |
| 109 | in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32); |
| 110 | in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32); |
| 111 | in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32); |
| 112 | |
| 113 | // Convert S32 to S16 |
| 114 | const int16x8x2_t in_s16 = |
| 115 | { |
| 116 | { |
| 117 | vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), |
| 118 | vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])) |
| 119 | } |
| 120 | }; |
| 121 | |
| 122 | // Convert S16 to S8 or U8 |
| 123 | typename wrapper::traits::neon_vector<T, 16>::type out = convert_to_8bit<T>(in_s16); |
| 124 | |
| 125 | out = wrapper::vmax(out, min); |
| 126 | out = wrapper::vmin(out, max); |
| 127 | |
| 128 | return out; |
| 129 | } |
| 130 | |
| 131 | class Coordinates; |
| 132 | |
| 133 | template <typename T> |
| 134 | void NEGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window) |
| 135 | { |
| 136 | using VectorType = typename wrapper::traits::neon_vector<T, 16>::type; |
| 137 | |
| 138 | const int32x4_t result_offset_s32 = vdupq_n_s32(_output_stage->gemmlowp_offset); |
| 139 | const int32x4_t result_shift_s32 = vdupq_n_s32(-_output_stage->gemmlowp_shift); |
| 140 | const int window_step_x = 16; |
| 141 | const auto window_start_x = static_cast<int>(window.x().start()); |
| 142 | const auto window_end_x = static_cast<int>(window.x().end()); |
| 143 | |
| 144 | const int clamp_min = (_is_bounded_relu) ? _output_stage->gemmlowp_min_bound : std::numeric_limits<T>::lowest(); |
| 145 | const int clamp_max = (_is_bounded_relu) ? _output_stage->gemmlowp_max_bound : std::numeric_limits<T>::max(); |
| 146 | |
| 147 | VectorType min = wrapper::vdup_n(static_cast<T>(clamp_min), wrapper::traits::vector_128_tag{}); |
| 148 | VectorType max = wrapper::vdup_n(static_cast<T>(clamp_max), wrapper::traits::vector_128_tag{}); |
| 149 | |
| 150 | Window win(window); |
| 151 | win.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 152 | |
| 153 | Iterator in(_input, win); |
| 154 | Iterator out(_output, win); |
| 155 | |
| 156 | if(_bias != nullptr) |
| 157 | { |
| 158 | Window win_biases; |
| 159 | win_biases.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 160 | win_biases.set(Window::DimY, Window::Dimension(0, 1, 1)); |
| 161 | |
| 162 | Iterator bias(_bias, win_biases); |
| 163 | execute_window_loop(win, [&](const Coordinates &) |
| 164 | { |
| 165 | // Compute 16 elements per iteration |
| 166 | int x = window_start_x; |
| 167 | for(; x <= (window_end_x - window_step_x); x += window_step_x) |
| 168 | { |
| 169 | int32x4x4_t in_s32 = |
| 170 | { |
| 171 | { |
| 172 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0), |
| 173 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4), |
| 174 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8), |
| 175 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12) |
| 176 | } |
| 177 | }; |
| 178 | |
| 179 | const int32x4x4_t bias_s32 = |
| 180 | { |
| 181 | { |
| 182 | vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 0), |
| 183 | vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 4), |
| 184 | vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 8), |
| 185 | vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 12) |
| 186 | } |
| 187 | }; |
| 188 | |
| 189 | // Add the bias to GEMM's result |
| 190 | in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); |
| 191 | in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); |
| 192 | in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); |
| 193 | in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); |
| 194 | |
| 195 | // Add the offset terms to GEMM's result and multiply by result_mult_int |
| 196 | scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); |
| 197 | |
| 198 | wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max)); |
| 199 | } |
| 200 | |
| 201 | // Compute left-over elements |
| 202 | for(; x < window_end_x; ++x) |
| 203 | { |
| 204 | const int bias_value = *(reinterpret_cast<const int *>(bias.ptr()) + x); |
| 205 | int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); |
| 206 | |
| 207 | // Quantize |
| 208 | in_value = ((in_value + bias_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift; |
| 209 | |
| 210 | // Store the result |
| 211 | *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); |
| 212 | } |
| 213 | }, |
| 214 | in, bias, out); |
| 215 | } |
| 216 | else |
| 217 | { |
| 218 | execute_window_loop(win, [&](const Coordinates &) |
| 219 | { |
| 220 | // Compute 16 elements per iteration |
| 221 | int x = window_start_x; |
| 222 | for(; x <= (window_end_x - window_step_x); x += window_step_x) |
| 223 | { |
| 224 | int32x4x4_t in_s32 = |
| 225 | { |
| 226 | { |
| 227 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0), |
| 228 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4), |
| 229 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8), |
| 230 | vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12) |
| 231 | } |
| 232 | }; |
| 233 | |
| 234 | // Add the offset terms to GEMM's result and multiply by result_mult_int |
| 235 | scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); |
| 236 | |
| 237 | wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max)); |
| 238 | } |
| 239 | |
| 240 | // Compute left-over elements |
| 241 | for(; x < window_end_x; ++x) |
| 242 | { |
| 243 | int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); |
| 244 | |
| 245 | // Quantize |
| 246 | in_value = ((in_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift; |
| 247 | |
| 248 | // Store the result |
| 249 | *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); |
| 250 | } |
| 251 | }, |
| 252 | in, out); |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | NEGEMMLowpQuantizeDownInt32ScaleKernel::NEGEMMLowpQuantizeDownInt32ScaleKernel() |
| 257 | : _func(nullptr), _input(nullptr), _bias(nullptr), _output(nullptr), _output_stage(nullptr), _is_bounded_relu(false) |
| 258 | { |
| 259 | } |
| 260 | |
| 261 | void NEGEMMLowpQuantizeDownInt32ScaleKernel::configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo *output_stage) |
| 262 | { |
| 263 | // Perform validate step |
| 264 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, output_stage); |
| 265 | |
| 266 | // Output auto inizialitation if not yet initialized |
| 267 | auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_stage->output_data_type)); |
| 268 | |
| 269 | ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), |
| 270 | (bias != nullptr) ? bias->info() : nullptr, |
| 271 | output->info(), |
| 272 | output_stage)); |
| 273 | |
| 274 | _input = input; |
| 275 | _bias = bias; |
| 276 | _output = output; |
| 277 | _output_stage = output_stage; |
| 278 | |
| 279 | // Configure kernel window |
| 280 | Window win = calculate_max_window(*input->info(), Steps()); |
| 281 | Coordinates coord; |
| 282 | coord.set_num_dimensions(output->info()->num_dimensions()); |
| 283 | output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); |
| 284 | |
| 285 | INEKernel::configure(win); |
| 286 | |
| 287 | // Check if we need to clamp the result using min and max |
| 288 | _is_bounded_relu = ((_output_stage->gemmlowp_min_bound != _output_stage->gemmlowp_max_bound) |
| 289 | && !(_output_stage->gemmlowp_min_bound == std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) |
| 290 | && _output_stage->gemmlowp_max_bound == std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)))); |
| 291 | if(_output_stage->output_data_type == DataType::QASYMM8) |
| 292 | { |
| 293 | _func = &NEGEMMLowpQuantizeDownInt32ScaleKernel::run<uint8_t>; |
| 294 | } |
| 295 | else if(_output_stage->output_data_type == DataType::QASYMM8_SIGNED) |
| 296 | { |
| 297 | _func = &NEGEMMLowpQuantizeDownInt32ScaleKernel::run<int8_t>; |
| 298 | } |
| 299 | else |
| 300 | { |
| 301 | ARM_COMPUTE_ERROR("Data type not supported"); |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | Status NEGEMMLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage) |
| 306 | { |
| 307 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); |
| 308 | ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, output_stage)); |
| 309 | |
| 310 | return Status{}; |
| 311 | } |
| 312 | |
| 313 | void NEGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, const ThreadInfo &info) |
| 314 | { |
| 315 | ARM_COMPUTE_UNUSED(info); |
| 316 | ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); |
| 317 | ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); |
| 318 | |
| 319 | (this->*_func)(window); |
| 320 | } |
| 321 | } // namespace arm_compute |