George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2019 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" |
| 25 | |
| 26 | #include "arm_compute/core/AccessWindowStatic.h" |
| 27 | #include "arm_compute/core/Error.h" |
| 28 | #include "arm_compute/core/Helpers.h" |
| 29 | #include "arm_compute/core/ITensor.h" |
| 30 | #include "arm_compute/core/NEON/NEAsymm.h" |
| 31 | #include "arm_compute/core/NEON/wrapper/wrapper.h" |
| 32 | #include "arm_compute/core/TensorInfo.h" |
| 33 | #include "arm_compute/core/Types.h" |
| 34 | #include "arm_compute/core/Utils.h" |
| 35 | #include "arm_compute/core/Validate.h" |
| 36 | #include "arm_compute/core/Window.h" |
| 37 | |
| 38 | #include <arm_neon.h> |
| 39 | #include <cstddef> |
| 40 | #include <cstdint> |
| 41 | #include <map> |
| 42 | |
| 43 | namespace arm_compute |
| 44 | { |
| 45 | class Coordinates; |
| 46 | |
| 47 | namespace |
| 48 | { |
| 49 | inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x) |
| 50 | { |
| 51 | return |
| 52 | { |
| 53 | { |
| 54 | vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0), |
| 55 | vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4), |
| 56 | vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8), |
| 57 | vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12) |
| 58 | } |
| 59 | }; |
| 60 | } |
| 61 | |
| 62 | inline int32x4x4_t load(const int32_t *ptr, int32_t x) |
| 63 | { |
| 64 | return |
| 65 | { |
| 66 | { |
| 67 | vld1q_s32(ptr + x + 0), |
| 68 | vld1q_s32(ptr + x + 4), |
| 69 | vld1q_s32(ptr + x + 8), |
| 70 | vld1q_s32(ptr + x + 12) |
| 71 | } |
| 72 | }; |
| 73 | } |
| 74 | |
| 75 | inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x) |
| 76 | { |
| 77 | int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x); |
| 78 | |
| 79 | a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset); |
| 80 | a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset); |
| 81 | a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset); |
| 82 | a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset); |
| 83 | return a_offset_term_s32; |
| 84 | } |
| 85 | |
| 86 | inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset) |
| 87 | { |
| 88 | int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr); |
| 89 | b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset); |
| 90 | return b_offset_term_s32; |
| 91 | } |
| 92 | |
| 93 | inline int32x4x4_t get_k_offset(int32_t k_offset) |
| 94 | { |
| 95 | return |
| 96 | { |
| 97 | { |
| 98 | vdupq_n_s32(k_offset), |
| 99 | vdupq_n_s32(k_offset), |
| 100 | vdupq_n_s32(k_offset), |
| 101 | vdupq_n_s32(k_offset) |
| 102 | } |
| 103 | }; |
| 104 | } |
| 105 | |
| 106 | template <bool is_bounded_relu> |
| 107 | inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8) |
| 108 | { |
| 109 | const static int32x4_t zero_s32 = vdupq_n_s32(0); |
| 110 | |
| 111 | // Shift final result (negative value shift right) |
| 112 | in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32); |
| 113 | in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32); |
| 114 | in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32); |
| 115 | in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32); |
| 116 | |
| 117 | // Saturate negative values |
| 118 | in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32); |
| 119 | in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32); |
| 120 | in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32); |
| 121 | in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32); |
| 122 | |
| 123 | // Convert S32 to S16 |
| 124 | const int16x8x2_t in_s16 = |
| 125 | { |
| 126 | { |
| 127 | vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), |
| 128 | vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])) |
| 129 | } |
| 130 | }; |
| 131 | |
| 132 | // Convert S16 to U8 |
| 133 | uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1])); |
| 134 | |
| 135 | if(is_bounded_relu) |
| 136 | { |
| 137 | out_u8 = vmaxq_u8(out_u8, min_u8); |
| 138 | out_u8 = vminq_u8(out_u8, max_u8); |
| 139 | } |
| 140 | |
| 141 | return out_u8; |
| 142 | } |
| 143 | |
| 144 | inline Window get_win_vector_sum(const Window &window) |
| 145 | { |
| 146 | Window win_vector_sum(window); |
| 147 | win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0)); |
| 148 | win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0)); |
| 149 | return win_vector_sum; |
| 150 | } |
| 151 | |
| 152 | inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col) |
| 153 | { |
| 154 | Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window)); |
| 155 | return vector_sum_col_it; |
| 156 | } |
| 157 | |
| 158 | inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row) |
| 159 | { |
| 160 | Window win_vector_sum_row = get_win_vector_sum(window); |
| 161 | win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); |
| 162 | Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row); |
| 163 | return vector_sum_row_it; |
| 164 | } |
| 165 | |
| 166 | inline Iterator get_bias_it(const Window &window, const ITensor *bias) |
| 167 | { |
| 168 | Window win_bias(window); |
| 169 | win_bias.set(Window::DimY, Window::Dimension(0, 1, 1)); |
| 170 | win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1)); |
| 171 | Iterator bias_it(bias, win_bias); |
| 172 | return bias_it; |
| 173 | } |
| 174 | |
| 175 | inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b) |
| 176 | { |
| 177 | return |
| 178 | { |
| 179 | { |
| 180 | vaddq_s32(a.val[0], b), |
| 181 | vaddq_s32(a.val[1], b), |
| 182 | vaddq_s32(a.val[2], b), |
| 183 | vaddq_s32(a.val[3], b) |
| 184 | } |
| 185 | }; |
| 186 | } |
| 187 | |
| 188 | inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b) |
| 189 | { |
| 190 | return |
| 191 | { |
| 192 | { |
| 193 | vaddq_s32(a.val[0], b.val[0]), |
| 194 | vaddq_s32(a.val[1], b.val[1]), |
| 195 | vaddq_s32(a.val[2], b.val[2]), |
| 196 | vaddq_s32(a.val[3], b.val[3]) |
| 197 | } |
| 198 | }; |
| 199 | } |
| 200 | |
| 201 | inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar) |
| 202 | { |
| 203 | return |
| 204 | { |
| 205 | { |
| 206 | vmulq_n_s32(a.val[0], mul_scalar), |
| 207 | vmulq_n_s32(a.val[1], mul_scalar), |
| 208 | vmulq_n_s32(a.val[2], mul_scalar), |
| 209 | vmulq_n_s32(a.val[3], mul_scalar) |
| 210 | } |
| 211 | }; |
| 212 | } |
| 213 | |
| 214 | template <bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point> |
| 215 | inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it, |
| 216 | const int32x4_t result_offset_s32, const int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, |
| 217 | int32_t a_offset, int32_t b_offset, int32_t k_offset, |
| 218 | GEMMLowpOutputStageInfo output_stage, int window_step_x, int window_start_x, int window_end_x) |
| 219 | { |
| 220 | int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 }; |
| 221 | if(!is_fixed_point) |
| 222 | { |
| 223 | // Combine quantization offset with other offsets. |
| 224 | offset_term_s32 = add_s32(offset_term_s32, result_offset_s32); |
| 225 | } |
| 226 | if(has_a_offset && has_b_offset) |
| 227 | { |
| 228 | offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset)); |
| 229 | } |
| 230 | if(has_b_offset) |
| 231 | { |
| 232 | offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset)); |
| 233 | } |
| 234 | |
| 235 | int x = window_start_x; |
| 236 | for(; x <= (window_end_x - window_step_x); x += window_step_x) |
| 237 | { |
| 238 | int32x4x4_t in_s32 = load_results_input(mm_result_it, x); |
| 239 | |
| 240 | if(has_a_offset) |
| 241 | { |
| 242 | in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x)); |
| 243 | } |
| 244 | if(has_bias) |
| 245 | { |
| 246 | in_s32 = add_s32(in_s32, load(bias_ptr, x)); |
| 247 | } |
| 248 | if(!is_fixed_point || has_b_offset) |
| 249 | { |
| 250 | in_s32 = add_s32(in_s32, offset_term_s32); |
| 251 | } |
| 252 | if(!is_fixed_point) |
| 253 | { |
| 254 | in_s32 = mul_s32(in_s32, output_stage.gemmlowp_multiplier); |
| 255 | } |
| 256 | |
| 257 | if(is_fixed_point) |
| 258 | { |
| 259 | vst1q_u8(out_it.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, result_offset_s32, min_u8, max_u8)); |
| 260 | } |
| 261 | else |
| 262 | { |
| 263 | vst1q_u8(out_it.ptr() + x, finalize_quantization_floating_point<is_bounded_relu>(in_s32, result_shift_s32, min_u8, max_u8)); |
| 264 | } |
| 265 | } |
| 266 | // Compute left-over elements |
| 267 | for(; x < window_end_x; ++x) |
| 268 | { |
| 269 | int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0); |
| 270 | |
| 271 | if(has_a_offset) |
| 272 | { |
| 273 | in_value += (*(vector_sum_col_ptr + x) * a_offset); |
| 274 | } |
| 275 | if(has_bias) |
| 276 | { |
| 277 | in_value += *(bias_ptr + x); |
| 278 | } |
| 279 | |
| 280 | if(is_fixed_point) |
| 281 | { |
| 282 | // Finalize and store the result |
| 283 | *(out_it.ptr() + x) = finalize_quantization<is_bounded_relu>(in_value, output_stage.gemmlowp_multiplier, output_stage.gemmlowp_shift, |
| 284 | output_stage.gemmlowp_offset, static_cast<uint8_t>(output_stage.gemmlowp_min_bound), static_cast<uint8_t>(output_stage.gemmlowp_max_bound)); |
| 285 | } |
| 286 | else |
| 287 | { |
| 288 | // Finalize quantization |
| 289 | in_value = (in_value * output_stage.gemmlowp_multiplier) >> output_stage.gemmlowp_shift; |
| 290 | |
| 291 | // Bound and store the result |
| 292 | if(is_bounded_relu) |
| 293 | { |
Georgios Pinitas | 6fa2638 | 2019-03-18 10:05:34 +0000 | [diff] [blame] | 294 | in_value = static_cast<uint8_t>(std::max<int32_t>(output_stage.gemmlowp_min_bound, std::min<int32_t>(output_stage.gemmlowp_max_bound, in_value))); |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 295 | } |
Georgios Pinitas | 6fa2638 | 2019-03-18 10:05:34 +0000 | [diff] [blame] | 296 | *(out_it.ptr() + x) = static_cast<uint8_t>(std::max<int32_t>(0, std::min<int32_t>(255, in_value))); |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 297 | } |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | template <bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point> |
| 302 | void run_offset_contribution_output_stage(const Window &window, |
| 303 | const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, |
| 304 | int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col, |
| 305 | GEMMLowpOutputStageInfo output_stage) |
| 306 | { |
| 307 | const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0; |
| 308 | const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1; |
| 309 | |
| 310 | const int32x4_t result_offset_s32 = vdupq_n_s32(output_stage.gemmlowp_offset); |
| 311 | const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? output_stage.gemmlowp_shift : -output_stage.gemmlowp_shift); |
| 312 | const uint8x16_t min_u8 = vdupq_n_u8(static_cast<uint8_t>(output_stage.gemmlowp_min_bound)); |
| 313 | const uint8x16_t max_u8 = vdupq_n_u8(static_cast<uint8_t>(output_stage.gemmlowp_max_bound)); |
| 314 | |
| 315 | const int window_step_x = 16; |
| 316 | const auto window_start_x = static_cast<int>(window.x().start()); |
| 317 | const auto window_end_x = static_cast<int>(window.x().end()); |
| 318 | |
| 319 | Window win(window); |
| 320 | win.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 321 | |
| 322 | Window collapsed_window = win.collapse_if_possible(win, Window::DimZ); |
| 323 | |
| 324 | Iterator mm_result_it(mm_result, win); |
| 325 | Iterator out_it(output, win); |
| 326 | |
| 327 | if((a_offset != 0) && (b_offset != 0)) |
| 328 | { |
| 329 | ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col); |
| 330 | ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row); |
| 331 | |
| 332 | Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col); |
| 333 | Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row); |
| 334 | |
| 335 | const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y(); |
| 336 | |
| 337 | // Offset in case vector_sum_col is batched |
| 338 | const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0; |
| 339 | |
| 340 | if(bias != nullptr) |
| 341 | { |
| 342 | Iterator bias_it = get_bias_it(collapsed_window, bias); |
| 343 | execute_window_loop(collapsed_window, [&](const Coordinates & id) |
| 344 | { |
| 345 | const int batch_id = id.z() / depth_input; |
| 346 | const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset); |
| 347 | const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) |
| 348 | + id.y() + (id.z() % depth_input) * height_input; |
| 349 | run_offset_contribution_output_stage_window<true, true, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, |
| 350 | out_it, |
| 351 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 352 | output_stage, window_step_x, window_start_x, window_end_x); |
| 353 | }, |
| 354 | vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it); |
| 355 | } |
| 356 | else |
| 357 | { |
| 358 | execute_window_loop(collapsed_window, [&](const Coordinates & id) |
| 359 | { |
| 360 | const int batch_id = id.z() / depth_input; |
| 361 | const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset); |
| 362 | const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) |
| 363 | + id.y() + (id.z() % depth_input) * height_input; |
| 364 | run_offset_contribution_output_stage_window<true, true, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it, |
| 365 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 366 | output_stage, window_step_x, window_start_x, window_end_x); |
| 367 | }, |
| 368 | vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it); |
| 369 | } |
| 370 | } |
| 371 | else if((a_offset == 0) && (b_offset != 0)) |
| 372 | { |
| 373 | ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row); |
| 374 | |
| 375 | Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row); |
| 376 | |
| 377 | const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y(); |
| 378 | |
| 379 | if(bias != nullptr) |
| 380 | { |
| 381 | Iterator bias_it = get_bias_it(collapsed_window, bias); |
| 382 | execute_window_loop(collapsed_window, [&](const Coordinates & id) |
| 383 | { |
| 384 | const int batch_id = id.z() / depth_input; |
| 385 | const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) |
| 386 | + id.y() + (id.z() % depth_input) * height_input; |
| 387 | run_offset_contribution_output_stage_window<false, true, true, is_bounded_relu, is_fixed_point>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it, |
| 388 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 389 | output_stage, window_step_x, window_start_x, window_end_x); |
| 390 | }, |
| 391 | vector_sum_row_it, bias_it, mm_result_it, out_it); |
| 392 | } |
| 393 | else |
| 394 | { |
| 395 | execute_window_loop(collapsed_window, [&](const Coordinates & id) |
| 396 | { |
| 397 | const int batch_id = id.z() / depth_input; |
| 398 | const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) |
| 399 | + id.y() + (id.z() % depth_input) * height_input; |
| 400 | run_offset_contribution_output_stage_window<false, true, false, is_bounded_relu, is_fixed_point>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it, |
| 401 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 402 | output_stage, window_step_x, window_start_x, window_end_x); |
| 403 | }, |
| 404 | vector_sum_row_it, mm_result_it, out_it); |
| 405 | } |
| 406 | } |
| 407 | else if((a_offset != 0) && (b_offset == 0)) |
| 408 | { |
| 409 | ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col); |
| 410 | |
| 411 | Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col); |
| 412 | |
| 413 | // Offset in case vector_sum_col is batched |
| 414 | const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0; |
| 415 | |
| 416 | if(bias != nullptr) |
| 417 | { |
| 418 | Iterator bias_it = get_bias_it(collapsed_window, bias); |
| 419 | execute_window_loop(collapsed_window, [&](const Coordinates & id) |
| 420 | { |
| 421 | const int batch_id = id.z() / depth_input; |
| 422 | const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset); |
| 423 | run_offset_contribution_output_stage_window<true, false, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it, |
| 424 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 425 | output_stage, window_step_x, window_start_x, window_end_x); |
| 426 | }, |
| 427 | vector_sum_col_it, bias_it, mm_result_it, out_it); |
| 428 | } |
| 429 | else |
| 430 | { |
| 431 | execute_window_loop(collapsed_window, [&](const Coordinates & id) |
| 432 | { |
| 433 | const int batch_id = id.z() / depth_input; |
| 434 | const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset); |
| 435 | run_offset_contribution_output_stage_window<true, false, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it, |
| 436 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 437 | output_stage, window_step_x, window_start_x, window_end_x); |
| 438 | }, |
| 439 | vector_sum_col_it, mm_result_it, out_it); |
| 440 | } |
| 441 | } |
| 442 | else |
| 443 | { |
| 444 | if(bias != nullptr) |
| 445 | { |
| 446 | Iterator bias_it = get_bias_it(collapsed_window, bias); |
Michalis Spyrou | a4f378d | 2019-04-26 14:54:54 +0100 | [diff] [blame] | 447 | execute_window_loop(collapsed_window, [&](const Coordinates &) |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 448 | { |
| 449 | run_offset_contribution_output_stage_window<false, false, true, is_bounded_relu, is_fixed_point>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it, |
| 450 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 451 | output_stage, window_step_x, window_start_x, window_end_x); |
| 452 | }, |
| 453 | bias_it, mm_result_it, out_it); |
| 454 | } |
| 455 | else |
| 456 | { |
Michalis Spyrou | a4f378d | 2019-04-26 14:54:54 +0100 | [diff] [blame] | 457 | execute_window_loop(collapsed_window, [&](const Coordinates &) |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 458 | { |
| 459 | run_offset_contribution_output_stage_window<false, false, false, is_bounded_relu, is_fixed_point>(nullptr, nullptr, nullptr, mm_result_it, out_it, |
| 460 | result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset, |
| 461 | output_stage, window_step_x, window_start_x, window_end_x); |
| 462 | }, |
| 463 | mm_result_it, out_it); |
| 464 | } |
| 465 | return; |
| 466 | } |
| 467 | } |
| 468 | |
| 469 | Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, |
| 470 | int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage) |
| 471 | { |
| 472 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32); |
| 473 | ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 255); |
| 474 | ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < 0 || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); |
| 475 | ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN && output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT); |
| 476 | |
| 477 | if(bias != nullptr) |
| 478 | { |
| 479 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); |
| 480 | ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); |
| 481 | ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0)); |
| 482 | } |
| 483 | |
| 484 | // If a_offset == 0, vector_sum_col can be a nullptr |
| 485 | if(a_offset != 0) |
| 486 | { |
| 487 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); |
| 488 | ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0)); |
| 489 | } |
| 490 | |
| 491 | // If b_offset == 0, vector_sum_row can be a nullptr |
| 492 | if(b_offset != 0) |
| 493 | { |
| 494 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); |
| 495 | |
| 496 | // Check if input is a 3D reinterpretation |
| 497 | const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x(); |
| 498 | |
| 499 | // Validate input |
| 500 | ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2))); |
| 501 | ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1)); |
| 502 | |
| 503 | TensorShape output_shape = output->tensor_shape(); |
| 504 | if(output_shape.num_dimensions() > 1) |
| 505 | { |
| 506 | const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2; |
| 507 | |
| 508 | TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape(); |
| 509 | vector_sum_row_shape.collapse_from(1); |
| 510 | output_shape.collapse_from(output_batch_idx); |
| 511 | |
| 512 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx], |
| 513 | "mm_result tensor must have the same number of batches of output tensor"); |
| 514 | |
| 515 | if(a_offset != 0) |
| 516 | { |
| 517 | TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape(); |
| 518 | vector_sum_col_shape.collapse_from(1); |
| 519 | |
| 520 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1], |
| 521 | "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1"); |
| 522 | } |
| 523 | } |
| 524 | } |
| 525 | |
| 526 | if(output->total_size() != 0) |
| 527 | { |
| 528 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); |
| 529 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output); |
| 530 | } |
| 531 | |
| 532 | return Status{}; |
| 533 | } |
| 534 | |
| 535 | std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output) |
| 536 | { |
| 537 | // Output auto inizialitation if not yet initialized |
| 538 | auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8)); |
| 539 | |
| 540 | // Configure kernel window |
| 541 | Window win = calculate_max_window(*mm_result, Steps()); |
| 542 | |
| 543 | // Note: This kernel performs 16 elements per iteration. |
| 544 | // However, since we use a left-over for loop, we cannot have any read or write out of memory |
| 545 | // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped |
| 546 | Coordinates coord; |
| 547 | coord.set_num_dimensions(output->num_dimensions()); |
| 548 | output->set_valid_region(ValidRegion(coord, output->tensor_shape())); |
| 549 | |
| 550 | return std::make_pair(Status{}, win); |
| 551 | } |
| 552 | |
| 553 | NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction |
| 554 | get_configured_function(const ITensor *mm_result, const ITensor *vector_sum_row, GEMMLowpOutputStageInfo output_stage) |
| 555 | { |
| 556 | static std::map<uint8_t, NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction> map_function = |
| 557 | { |
| 558 | { 0, &run_offset_contribution_output_stage<false, false, false> }, |
| 559 | { 1, &run_offset_contribution_output_stage<true, false, false> }, |
| 560 | { 2, &run_offset_contribution_output_stage<false, true, false> }, |
| 561 | { 3, &run_offset_contribution_output_stage<true, true, false> }, |
| 562 | { 4, &run_offset_contribution_output_stage<false, false, true> }, |
| 563 | { 5, &run_offset_contribution_output_stage<true, false, true> }, |
| 564 | { 6, &run_offset_contribution_output_stage<false, true, true> }, |
| 565 | { 7, &run_offset_contribution_output_stage<true, true, true> } |
| 566 | }; |
| 567 | |
| 568 | // Check if input is a 3D reinterpretation |
| 569 | const bool reinterpret_as_3d = vector_sum_row != nullptr |
| 570 | && mm_result->info()->num_dimensions() > 1 |
| 571 | && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x(); |
| 572 | |
| 573 | // Check if we need to clamp the result using min and max |
| 574 | const bool is_bounded_relu = ((output_stage.gemmlowp_min_bound != output_stage.gemmlowp_max_bound) |
| 575 | && !(output_stage.gemmlowp_min_bound == 0 && output_stage.gemmlowp_max_bound == 255)); |
| 576 | |
| 577 | const bool is_fixed_point = output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN; |
| 578 | |
| 579 | // key acts as a bitset, setting the first bit on reinterpret_as_3d, |
| 580 | // the second on is_bounded_relu, and the third on is_fixed_point. |
| 581 | uint8_t key = (reinterpret_as_3d ? 1UL : 0UL) | ((is_bounded_relu ? 1UL : 0UL) << 1) | ((is_fixed_point ? 1UL : 0UL) << 2); |
| 582 | return map_function.find(key)->second; |
| 583 | } |
| 584 | } // namespace |
| 585 | |
| 586 | NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel() |
| 587 | : _function(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true), |
| 588 | _output_stage(GEMMLowpOutputStageInfo()) |
| 589 | |
| 590 | { |
| 591 | } |
| 592 | |
| 593 | void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col, |
| 594 | const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k, |
| 595 | int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage) |
| 596 | { |
| 597 | // Perform validate step |
| 598 | ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output); |
| 599 | |
| 600 | ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(), |
| 601 | vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT |
| 602 | vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT |
| 603 | bias != nullptr ? bias->info() : nullptr, // NOLINT |
| 604 | output->info(), a_offset, b_offset, output_stage)); // NOLINT |
| 605 | |
| 606 | _vector_sum_col = vector_sum_col; |
| 607 | _vector_sum_row = vector_sum_row; |
| 608 | _bias = bias; |
| 609 | _mm_result = mm_result; |
| 610 | _output = output; |
| 611 | _a_offset = a_offset; |
| 612 | _b_offset = b_offset; |
| 613 | _k_offset = a_offset * b_offset * k; |
| 614 | _output_stage = output_stage; |
| 615 | |
| 616 | // If a_offset == 0, vector_sum_col can be a nullptr |
| 617 | if(a_offset != 0) |
| 618 | { |
| 619 | // Check if vector_sum_col_shape should be slidden or not |
| 620 | // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1 |
| 621 | // This scenario can happen when the the matrix multiplication is used to perform a convolution operation |
| 622 | _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1; |
| 623 | } |
| 624 | |
| 625 | // Configure kernel window |
| 626 | auto win_config = validate_and_configure_window(mm_result->info(), output->info()); |
| 627 | ARM_COMPUTE_ERROR_THROW_ON(win_config.first); |
| 628 | INEKernel::configure(win_config.second); |
| 629 | |
| 630 | _function = get_configured_function(mm_result, vector_sum_row, output_stage); |
| 631 | } |
| 632 | |
| 633 | Status NEGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, |
| 634 | const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, |
| 635 | int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage) |
| 636 | { |
| 637 | ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output); |
| 638 | ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage)); |
| 639 | ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first); |
| 640 | return Status{}; |
| 641 | } |
| 642 | |
| 643 | void NEGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, const ThreadInfo &info) |
| 644 | { |
| 645 | ARM_COMPUTE_UNUSED(info); |
| 646 | ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); |
| 647 | ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); |
| 648 | _function(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage); |
| 649 | } |
| 650 | |
| 651 | } // namespace arm_compute |