blob: dfed7f0bb8a495df82af06daf9d43ea0621608a3 [file] [log] [blame]
George Wort2d7e6832019-02-22 16:37:41 +00001/*
SiCongLib88272e2021-02-24 15:40:57 +00002 * Copyright (c) 2019-2021 Arm Limited.
George Wort2d7e6832019-02-22 16:37:41 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouebcebf12020-10-21 00:04:14 +010024#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
George Wort2d7e6832019-02-22 16:37:41 +000025
George Wort2d7e6832019-02-22 16:37:41 +000026#include "arm_compute/core/Error.h"
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/ITensor.h"
George Wort2d7e6832019-02-22 16:37:41 +000029#include "arm_compute/core/TensorInfo.h"
30#include "arm_compute/core/Types.h"
31#include "arm_compute/core/Utils.h"
32#include "arm_compute/core/Validate.h"
33#include "arm_compute/core/Window.h"
Georgios Pinitasddb93bb2020-10-02 16:38:59 +010034#include "src/core/NEON/NEAsymm.h"
35#include "src/core/NEON/wrapper/wrapper.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010036#include "src/core/helpers/AutoConfiguration.h"
37#include "src/core/helpers/WindowHelpers.h"
George Wort2d7e6832019-02-22 16:37:41 +000038
39#include <arm_neon.h>
40#include <cstddef>
41#include <cstdint>
42#include <map>
43
44namespace arm_compute
45{
George Wort2d7e6832019-02-22 16:37:41 +000046namespace
47{
48inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
49{
50 return
51 {
52 {
53 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
54 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
55 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
56 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
57 }
58 };
59}
60
61inline int32x4x4_t load(const int32_t *ptr, int32_t x)
62{
63 return
64 {
65 {
66 vld1q_s32(ptr + x + 0),
67 vld1q_s32(ptr + x + 4),
68 vld1q_s32(ptr + x + 8),
69 vld1q_s32(ptr + x + 12)
70 }
71 };
72}
73
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010074inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
75{
76 return
77 {
78 {
79 vaddq_s32(a.val[0], b),
80 vaddq_s32(a.val[1], b),
81 vaddq_s32(a.val[2], b),
82 vaddq_s32(a.val[3], b)
83 }
84 };
85}
86
87inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
88{
89 return
90 {
91 {
92 vaddq_s32(a.val[0], b.val[0]),
93 vaddq_s32(a.val[1], b.val[1]),
94 vaddq_s32(a.val[2], b.val[2]),
95 vaddq_s32(a.val[3], b.val[3])
96 }
97 };
98}
99
100inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
101{
102 return
103 {
104 {
105 vmulq_n_s32(a.val[0], mul_scalar),
106 vmulq_n_s32(a.val[1], mul_scalar),
107 vmulq_n_s32(a.val[2], mul_scalar),
108 vmulq_n_s32(a.val[3], mul_scalar)
109 }
110 };
111}
112
113inline int32x4x4_t mul_s32(int32x4x4_t &a, const int32_t *multilpier)
114{
115 return
116 {
117 {
118 vmulq_s32(a.val[0], vld1q_s32(multilpier)),
119 vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
120 vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
121 vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
122 }
123 };
124}
125
George Wort2d7e6832019-02-22 16:37:41 +0000126inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
127{
128 int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
129
130 a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
131 a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
132 a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
133 a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
134 return a_offset_term_s32;
135}
136
137inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
138{
139 int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
140 b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
141 return b_offset_term_s32;
142}
143
144inline int32x4x4_t get_k_offset(int32_t k_offset)
145{
146 return
147 {
148 {
149 vdupq_n_s32(k_offset),
150 vdupq_n_s32(k_offset),
151 vdupq_n_s32(k_offset),
152 vdupq_n_s32(k_offset)
153 }
154 };
155}
156
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100157inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
George Wort2d7e6832019-02-22 16:37:41 +0000158{
159 const static int32x4_t zero_s32 = vdupq_n_s32(0);
160
161 // Shift final result (negative value shift right)
162 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
163 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
164 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
165 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
166
167 // Saturate negative values
168 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
169 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
170 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
171 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
172
173 // Convert S32 to S16
174 const int16x8x2_t in_s16 =
175 {
176 {
177 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
178 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
179 }
180 };
181
182 // Convert S16 to U8
183 uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
184
185 if(is_bounded_relu)
186 {
187 out_u8 = vmaxq_u8(out_u8, min_u8);
188 out_u8 = vminq_u8(out_u8, max_u8);
189 }
190
191 return out_u8;
192}
193
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100194inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100195{
196 const static int32x4_t zero_s32 = vdupq_n_s32(0);
197
198 // Shift final result (negative value shift right)
199 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
200 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
201 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
202 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
203
204 // Saturate negative values
205 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
206 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
207 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
208 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
209
210 // Convert S32 to S16
211 const int16x8x2_t in_s16 =
212 {
213 {
214 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
215 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
216 }
217 };
218
219 // Convert S16 to S8
220 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
221
222 if(is_bounded_relu)
223 {
224 out_s8 = vmaxq_s8(out_s8, min_s8);
225 out_s8 = vminq_s8(out_s8, max_s8);
226 }
227
228 return out_s8;
229}
230
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100231inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100232{
233 const static int32x4_t zero_s32 = vdupq_n_s32(0);
234
235 // Shift final result (negative value shift right)
236 in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
237 in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
238 in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
239 in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
240
241 // Saturate negative values
242 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
243 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
244 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
245 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
246
247 // Convert S32 to S16
248 const int16x8x2_t in_s16 =
249 {
250 {
251 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
252 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
253 }
254 };
255
256 // Convert S16 to S8
257 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
258
259 if(is_bounded_relu)
260 {
261 out_s8 = vmaxq_s8(out_s8, min_s8);
262 out_s8 = vminq_s8(out_s8, max_s8);
263 }
264
265 return out_s8;
266}
267
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000268template <typename T>
269struct VectorTyper
270{
271 using stype = T;
272 using vtype = typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
273};
274
George Wort2d7e6832019-02-22 16:37:41 +0000275inline Window get_win_vector_sum(const Window &window)
276{
277 Window win_vector_sum(window);
278 win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
279 win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
280 return win_vector_sum;
281}
282
283inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
284{
285 Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
286 return vector_sum_col_it;
287}
288
289inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
290{
291 Window win_vector_sum_row = get_win_vector_sum(window);
292 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
293 Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
294 return vector_sum_row_it;
295}
296
297inline Iterator get_bias_it(const Window &window, const ITensor *bias)
298{
299 Window win_bias(window);
300 win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
301 win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
302 Iterator bias_it(bias, win_bias);
303 return bias_it;
304}
305
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100306template <typename VT>
George Wort2d7e6832019-02-22 16:37:41 +0000307inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000308 const int32x4_t result_offset_s32, const int32x4_t result_shift_s32,
309 typename VT::vtype min_vec, typename VT::vtype max_vec,
George Wort2d7e6832019-02-22 16:37:41 +0000310 int32_t a_offset, int32_t b_offset, int32_t k_offset,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100311 int32_t multiplier, int32_t shift, int32_t offset, int32_t min_bound, int32_t max_bound,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100312 int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
George Wort2d7e6832019-02-22 16:37:41 +0000313{
314 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
315 if(!is_fixed_point)
316 {
317 // Combine quantization offset with other offsets.
318 offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
319 }
320 if(has_a_offset && has_b_offset)
321 {
322 offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
323 }
324 if(has_b_offset)
325 {
326 offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
327 }
328
329 int x = window_start_x;
330 for(; x <= (window_end_x - window_step_x); x += window_step_x)
331 {
332 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
333
334 if(has_a_offset)
335 {
336 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
337 }
338 if(has_bias)
339 {
340 in_s32 = add_s32(in_s32, load(bias_ptr, x));
341 }
342 if(!is_fixed_point || has_b_offset)
343 {
344 in_s32 = add_s32(in_s32, offset_term_s32);
345 }
346 if(!is_fixed_point)
347 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100348 in_s32 = mul_s32(in_s32, multiplier);
George Wort2d7e6832019-02-22 16:37:41 +0000349 }
350
351 if(is_fixed_point)
352 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000353 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100354 finalize_quantization(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec, is_bounded_relu));
George Wort2d7e6832019-02-22 16:37:41 +0000355 }
356 else
357 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000358 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100359 finalize_quantization_floating_point(in_s32, result_shift_s32, min_vec, max_vec, is_bounded_relu));
George Wort2d7e6832019-02-22 16:37:41 +0000360 }
361 }
362 // Compute left-over elements
363 for(; x < window_end_x; ++x)
364 {
365 int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
366
367 if(has_a_offset)
368 {
369 in_value += (*(vector_sum_col_ptr + x) * a_offset);
370 }
371 if(has_bias)
372 {
373 in_value += *(bias_ptr + x);
374 }
375
376 if(is_fixed_point)
377 {
378 // Finalize and store the result
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100379 *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = finalize_quantization(in_value, multiplier, shift, offset,
380 static_cast<typename VT::stype>(min_bound),
381 static_cast<typename VT::stype>(max_bound), is_bounded_relu);
George Wort2d7e6832019-02-22 16:37:41 +0000382 }
383 else
384 {
385 // Finalize quantization
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100386 in_value = (in_value * multiplier) >> shift;
George Wort2d7e6832019-02-22 16:37:41 +0000387
388 // Bound and store the result
389 if(is_bounded_relu)
390 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000391 in_value = static_cast<typename VT::stype>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
George Wort2d7e6832019-02-22 16:37:41 +0000392 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000393 *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = static_cast<typename VT::stype>(std::max<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::lowest()),
394 std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
George Wort2d7e6832019-02-22 16:37:41 +0000395 }
396 }
397}
398
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100399inline void run_offset_contribution_output_stage_window_symm(const int32_t *vector_sum_col_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
400 const int32_t *result_multipliers, const int32_t *result_shifts,
401 const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
402 int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100403 int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100404{
405 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
406 if(!is_fixed_point)
407 {
408 // Combine quantization offset with other offsets.
409 offset_term_s32 = add_s32(offset_term_s32, result_offset);
410 }
411
412 int x = window_start_x;
413 for(; x <= (window_end_x - window_step_x); x += window_step_x)
414 {
415 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
416
417 if(has_a_offset)
418 {
419 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
420 }
421 if(has_bias)
422 {
423 in_s32 = add_s32(in_s32, load(bias_ptr, x));
424 }
425 if(!is_fixed_point)
426 {
427 in_s32 = add_s32(in_s32, offset_term_s32);
428 in_s32 = mul_s32(in_s32, result_multipliers + x);
429 }
430
431 if(is_fixed_point)
432 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100433 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_symm(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8, is_bounded_relu));
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100434 }
435 else
436 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100437 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point(in_s32, load(result_shifts, x), min_s8, max_s8, is_bounded_relu));
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100438 }
439 }
440 // Compute left-over elements
441 for(; x < window_end_x; ++x)
442 {
443 int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
444
445 if(has_a_offset)
446 {
447 in_value += (*(vector_sum_col_ptr + x) * a_offset);
448 }
449 if(has_bias)
450 {
451 in_value += *(bias_ptr + x);
452 }
453
454 if(is_fixed_point)
455 {
456 // Finalize and store the result
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100457 *(out_it.ptr() + x) = finalize_quantization(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound), static_cast<int8_t>(max_bound), is_bounded_relu);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100458 }
459 else
460 {
461 // Finalize quantization
462 in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
463
464 // Bound and store the result
465 if(is_bounded_relu)
466 {
467 in_value = static_cast<int8_t>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
468 }
469 *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
470 }
471 }
472}
473
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100474template <typename T>
George Wort2d7e6832019-02-22 16:37:41 +0000475void run_offset_contribution_output_stage(const Window &window,
476 const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
477 int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100478 GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
George Wort2d7e6832019-02-22 16:37:41 +0000479{
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000480 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
481 using Typer = VectorTyper<T>;
482
George Wort2d7e6832019-02-22 16:37:41 +0000483 const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
484 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
485
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100486 const int32_t multiplier = output_stage.gemmlowp_multiplier;
487 const int32_t shift = output_stage.gemmlowp_shift;
488 const int32_t offset = output_stage.gemmlowp_offset;
489 const int32_t min_bound = output_stage.gemmlowp_min_bound;
490 const int32_t max_bound = output_stage.gemmlowp_max_bound;
491
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000492 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
493 const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? shift : -shift);
494 const auto min_vec = wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
495 const auto max_vec = wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
George Wort2d7e6832019-02-22 16:37:41 +0000496
497 const int window_step_x = 16;
498 const auto window_start_x = static_cast<int>(window.x().start());
499 const auto window_end_x = static_cast<int>(window.x().end());
500
501 Window win(window);
502 win.set(Window::DimX, Window::Dimension(0, 1, 1));
503
504 Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
505
506 Iterator mm_result_it(mm_result, win);
507 Iterator out_it(output, win);
508
509 if((a_offset != 0) && (b_offset != 0))
510 {
511 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
512 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
513
514 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
515 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
516
517 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
518
519 // Offset in case vector_sum_col is batched
520 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
521
522 if(bias != nullptr)
523 {
524 Iterator bias_it = get_bias_it(collapsed_window, bias);
525 execute_window_loop(collapsed_window, [&](const Coordinates & id)
526 {
527 const int batch_id = id.z() / depth_input;
528 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
529 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
530 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100531 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()),
532 mm_result_it,
533 out_it,
534 result_offset_s32, result_shift_s32,
535 min_vec, max_vec, a_offset, b_offset, k_offset,
536 multiplier, shift, offset, min_bound, max_bound,
537 window_step_x, window_start_x, window_end_x, true, true, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000538 },
539 vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
540 }
541 else
542 {
543 execute_window_loop(collapsed_window, [&](const Coordinates & id)
544 {
545 const int batch_id = id.z() / depth_input;
546 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
547 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
548 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100549 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
550 result_offset_s32, result_shift_s32,
551 min_vec, max_vec, a_offset, b_offset, k_offset,
552 multiplier, shift, offset, min_bound, max_bound,
553 window_step_x, window_start_x, window_end_x, true, true, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000554 },
555 vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
556 }
557 }
558 else if((a_offset == 0) && (b_offset != 0))
559 {
560 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
561
562 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
563
564 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
565
566 if(bias != nullptr)
567 {
568 Iterator bias_it = get_bias_it(collapsed_window, bias);
569 execute_window_loop(collapsed_window, [&](const Coordinates & id)
570 {
571 const int batch_id = id.z() / depth_input;
572 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
573 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100574 run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
575 out_it,
576 result_offset_s32, result_shift_s32,
577 min_vec, max_vec, a_offset, b_offset, k_offset,
578 multiplier, shift, offset, min_bound, max_bound,
579 window_step_x, window_start_x, window_end_x, false, true, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000580 },
581 vector_sum_row_it, bias_it, mm_result_it, out_it);
582 }
583 else
584 {
585 execute_window_loop(collapsed_window, [&](const Coordinates & id)
586 {
587 const int batch_id = id.z() / depth_input;
588 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
589 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100590 run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
591 result_offset_s32, result_shift_s32,
592 min_vec, max_vec, a_offset, b_offset, k_offset,
593 multiplier, shift, offset, min_bound, max_bound,
594 window_step_x, window_start_x, window_end_x, false, true, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000595 },
596 vector_sum_row_it, mm_result_it, out_it);
597 }
598 }
599 else if((a_offset != 0) && (b_offset == 0))
600 {
601 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
602
603 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
604
605 // Offset in case vector_sum_col is batched
606 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
607
608 if(bias != nullptr)
609 {
610 Iterator bias_it = get_bias_it(collapsed_window, bias);
611 execute_window_loop(collapsed_window, [&](const Coordinates & id)
612 {
613 const int batch_id = id.z() / depth_input;
614 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100615 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
616 out_it,
617 result_offset_s32, result_shift_s32,
618 min_vec, max_vec, a_offset, b_offset, k_offset,
619 multiplier, shift, offset, min_bound, max_bound,
620 window_step_x, window_start_x, window_end_x, true, false, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000621 },
622 vector_sum_col_it, bias_it, mm_result_it, out_it);
623 }
624 else
625 {
626 execute_window_loop(collapsed_window, [&](const Coordinates & id)
627 {
628 const int batch_id = id.z() / depth_input;
629 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100630 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
631 result_offset_s32, result_shift_s32,
632 min_vec, max_vec, a_offset, b_offset, k_offset,
633 multiplier, shift, offset, min_bound, max_bound,
634 window_step_x, window_start_x, window_end_x, true, false, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000635 },
636 vector_sum_col_it, mm_result_it, out_it);
637 }
638 }
639 else
640 {
641 if(bias != nullptr)
642 {
643 Iterator bias_it = get_bias_it(collapsed_window, bias);
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100644 execute_window_loop(collapsed_window, [&](const Coordinates &)
George Wort2d7e6832019-02-22 16:37:41 +0000645 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100646 run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
647 result_offset_s32, result_shift_s32,
648 min_vec, max_vec, a_offset, b_offset, k_offset,
649 multiplier, shift, offset, min_bound, max_bound,
650 window_step_x, window_start_x, window_end_x, false, false, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000651 },
652 bias_it, mm_result_it, out_it);
653 }
654 else
655 {
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100656 execute_window_loop(collapsed_window, [&](const Coordinates &)
George Wort2d7e6832019-02-22 16:37:41 +0000657 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100658 run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, nullptr, mm_result_it, out_it,
659 result_offset_s32, result_shift_s32,
660 min_vec, max_vec, a_offset, b_offset, k_offset,
661 multiplier, shift, offset, min_bound, max_bound,
662 window_step_x, window_start_x, window_end_x, false, false, false, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100663 },
664 mm_result_it, out_it);
665 }
666 return;
667 }
668}
669
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100670void run_offset_contribution_output_stage_symm(const Window &window,
671 const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
672 int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100673 GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100674{
675 ARM_COMPUTE_UNUSED(vector_sum_row, b_offset, k_offset);
676
677 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
678
679 const int32_t offset = output_stage.gemmlowp_offset;
680 const int32_t min_bound = output_stage.gemmlowp_min_bound;
681 const int32_t max_bound = output_stage.gemmlowp_max_bound;
682
683 const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
684 const int32_t *result_shifts = output_stage.gemmlowp_shifts.data();
685 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
686 const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(min_bound));
687 const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(max_bound));
688
689 const int window_step_x = 16;
690 const auto window_start_x = static_cast<int>(window.x().start());
691 const auto window_end_x = static_cast<int>(window.x().end());
692
693 Window win(window);
694 win.set(Window::DimX, Window::Dimension(0, 1, 1));
695
696 Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
697
698 Iterator mm_result_it(mm_result, win);
699 Iterator out_it(output, win);
700
701 if(a_offset != 0)
702 {
703 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
704
705 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
706
707 // Offset in case vector_sum_col is batched
708 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
709
710 if(bias != nullptr)
711 {
712 Iterator bias_it = get_bias_it(collapsed_window, bias);
713 execute_window_loop(collapsed_window, [&](const Coordinates & id)
714 {
715 const int batch_id = id.z() / depth_input;
716 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100717 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
718 result_multipliers, result_shifts,
719 result_offset_s32, min_s8, max_s8,
720 a_offset, offset, min_bound, max_bound,
721 window_step_x, window_start_x, window_end_x, true, true, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100722 },
723 vector_sum_col_it, bias_it, mm_result_it, out_it);
724 }
725 else
726 {
727 execute_window_loop(collapsed_window, [&](const Coordinates & id)
728 {
729 const int batch_id = id.z() / depth_input;
730 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100731 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, nullptr, mm_result_it, out_it,
732 result_multipliers, result_shifts,
733 result_offset_s32, min_s8, max_s8,
734 a_offset, offset, min_bound, max_bound,
735 window_step_x, window_start_x, window_end_x, true, false, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100736 },
737 vector_sum_col_it, mm_result_it, out_it);
738 }
739 }
740 else
741 {
742 if(bias != nullptr)
743 {
744 Iterator bias_it = get_bias_it(collapsed_window, bias);
745 execute_window_loop(collapsed_window, [&](const Coordinates &)
746 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100747 run_offset_contribution_output_stage_window_symm(nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
748 result_multipliers, result_shifts,
749 result_offset_s32, min_s8, max_s8,
750 a_offset, offset, min_bound, max_bound,
751 window_step_x, window_start_x, window_end_x, false, true, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100752 },
753 bias_it, mm_result_it, out_it);
754 }
755 else
756 {
757 execute_window_loop(collapsed_window, [&](const Coordinates &)
758 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100759 run_offset_contribution_output_stage_window_symm(nullptr, nullptr, mm_result_it, out_it,
760 result_multipliers, result_shifts,
761 result_offset_s32, min_s8, max_s8,
762 a_offset, offset, min_bound, max_bound,
763 window_step_x, window_start_x, window_end_x, false, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000764 },
765 mm_result_it, out_it);
766 }
767 return;
768 }
769}
770
771Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
772 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
773{
774 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
Giorgio Arena1856ff72020-02-07 13:46:45 +0000775 if(output->data_type() != DataType::QASYMM8)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100776 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100777 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) > 1 && output_stage.gemmlowp_multipliers.size() > 1 && b_offset != 0);
778 }
779 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000780 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN && output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
781
782 if(bias != nullptr)
783 {
784 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
785 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
786 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
787 }
788
789 // If a_offset == 0, vector_sum_col can be a nullptr
790 if(a_offset != 0)
791 {
792 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
793 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
794 }
795
796 // If b_offset == 0, vector_sum_row can be a nullptr
797 if(b_offset != 0)
798 {
799 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
800
801 // Check if input is a 3D reinterpretation
802 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
803
804 // Validate input
805 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
806 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
807
808 TensorShape output_shape = output->tensor_shape();
809 if(output_shape.num_dimensions() > 1)
810 {
811 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
812
813 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
814 vector_sum_row_shape.collapse_from(1);
815 output_shape.collapse_from(output_batch_idx);
816
817 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
818 "mm_result tensor must have the same number of batches of output tensor");
819
820 if(a_offset != 0)
821 {
822 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
823 vector_sum_col_shape.collapse_from(1);
824
825 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
826 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
827 }
828 }
829 }
830
831 if(output->total_size() != 0)
832 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100833 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
George Wort2d7e6832019-02-22 16:37:41 +0000834 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
835 }
836
837 return Status{};
838}
839
840std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
841{
842 // Output auto inizialitation if not yet initialized
843 auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
844
845 // Configure kernel window
846 Window win = calculate_max_window(*mm_result, Steps());
847
848 // Note: This kernel performs 16 elements per iteration.
849 // However, since we use a left-over for loop, we cannot have any read or write out of memory
850 // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
George Wort2d7e6832019-02-22 16:37:41 +0000851
852 return std::make_pair(Status{}, win);
853}
George Wort2d7e6832019-02-22 16:37:41 +0000854} // namespace
855
856NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel()
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100857 : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
George Wort2d7e6832019-02-22 16:37:41 +0000858 _output_stage(GEMMLowpOutputStageInfo())
859
860{
861}
862
863void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100864 const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
865 int32_t k, int32_t a_offset, int32_t b_offset,
866 GEMMLowpOutputStageInfo output_stage)
George Wort2d7e6832019-02-22 16:37:41 +0000867{
868 // Perform validate step
869 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
870
871 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
872 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
873 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
874 bias != nullptr ? bias->info() : nullptr, // NOLINT
875 output->info(), a_offset, b_offset, output_stage)); // NOLINT
876
877 _vector_sum_col = vector_sum_col;
878 _vector_sum_row = vector_sum_row;
879 _bias = bias;
880 _mm_result = mm_result;
881 _output = output;
882 _a_offset = a_offset;
883 _b_offset = b_offset;
884 _k_offset = a_offset * b_offset * k;
885 _output_stage = output_stage;
886
887 // If a_offset == 0, vector_sum_col can be a nullptr
888 if(a_offset != 0)
889 {
890 // Check if vector_sum_col_shape should be slidden or not
891 // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
892 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
893 _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
894 }
895
896 // Configure kernel window
897 auto win_config = validate_and_configure_window(mm_result->info(), output->info());
898 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
899 INEKernel::configure(win_config.second);
George Wort2d7e6832019-02-22 16:37:41 +0000900}
901
902Status NEGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col,
903 const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
904 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
905{
906 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
907 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
908 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first);
909 return Status{};
910}
911
912void NEGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, const ThreadInfo &info)
913{
914 ARM_COMPUTE_UNUSED(info);
915 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
916 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100917
918 PixelValue type_min{};
919 PixelValue type_max{};
920 std::tie(type_min, type_max) = get_min_max(_output->info()->data_type());
921 int32_t type_min_int = type_min.get<int32_t>();
922 int32_t type_max_int = type_max.get<int32_t>();
923
924 const bool reinterpret_as_3d = _vector_sum_row != nullptr
925 && _mm_result->info()->num_dimensions() > 1
926 && _mm_result->info()->tensor_shape().y() != _vector_sum_row->info()->tensor_shape().x();
927
928 const bool is_bounded_relu = !(_output_stage.gemmlowp_min_bound <= type_min_int && _output_stage.gemmlowp_max_bound >= type_max_int);
929
930 // Check if we need to perform fixed point requantization
931 const bool is_fixed_point = _output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
932
933 // Check if symmetric per-channel execution
934 const bool is_signed = _output->info()->data_type() == DataType::QASYMM8_SIGNED;
935
936 // Check if symmetric per-channel execution
937 const bool is_symm = _output_stage.is_quantized_per_channel;
938
939 if(is_symm)
940 {
941 run_offset_contribution_output_stage_symm(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
942 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
943 }
944 else
945 {
946 if(is_signed)
947 {
948 run_offset_contribution_output_stage<int8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
949 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
950 }
951 else
952 {
953 run_offset_contribution_output_stage<uint8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
954 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
955 }
956 }
George Wort2d7e6832019-02-22 16:37:41 +0000957}
958
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100959} // namespace arm_compute