blob: 84187332f89370845edddfd9ce0c09bd88d3180f [file] [log] [blame]
George Wort2d7e6832019-02-22 16:37:41 +00001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/NEON/NEAsymm.h"
31#include "arm_compute/core/NEON/wrapper/wrapper.h"
32#include "arm_compute/core/TensorInfo.h"
33#include "arm_compute/core/Types.h"
34#include "arm_compute/core/Utils.h"
35#include "arm_compute/core/Validate.h"
36#include "arm_compute/core/Window.h"
37
38#include <arm_neon.h>
39#include <cstddef>
40#include <cstdint>
41#include <map>
42
43namespace arm_compute
44{
45class Coordinates;
46
47namespace
48{
49inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
50{
51 return
52 {
53 {
54 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
55 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
56 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
57 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
58 }
59 };
60}
61
62inline int32x4x4_t load(const int32_t *ptr, int32_t x)
63{
64 return
65 {
66 {
67 vld1q_s32(ptr + x + 0),
68 vld1q_s32(ptr + x + 4),
69 vld1q_s32(ptr + x + 8),
70 vld1q_s32(ptr + x + 12)
71 }
72 };
73}
74
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010075inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
76{
77 return
78 {
79 {
80 vaddq_s32(a.val[0], b),
81 vaddq_s32(a.val[1], b),
82 vaddq_s32(a.val[2], b),
83 vaddq_s32(a.val[3], b)
84 }
85 };
86}
87
88inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
89{
90 return
91 {
92 {
93 vaddq_s32(a.val[0], b.val[0]),
94 vaddq_s32(a.val[1], b.val[1]),
95 vaddq_s32(a.val[2], b.val[2]),
96 vaddq_s32(a.val[3], b.val[3])
97 }
98 };
99}
100
101inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
102{
103 return
104 {
105 {
106 vmulq_n_s32(a.val[0], mul_scalar),
107 vmulq_n_s32(a.val[1], mul_scalar),
108 vmulq_n_s32(a.val[2], mul_scalar),
109 vmulq_n_s32(a.val[3], mul_scalar)
110 }
111 };
112}
113
114inline int32x4x4_t mul_s32(int32x4x4_t &a, const int32_t *multilpier)
115{
116 return
117 {
118 {
119 vmulq_s32(a.val[0], vld1q_s32(multilpier)),
120 vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
121 vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
122 vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
123 }
124 };
125}
126
George Wort2d7e6832019-02-22 16:37:41 +0000127inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
128{
129 int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
130
131 a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
132 a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
133 a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
134 a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
135 return a_offset_term_s32;
136}
137
138inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
139{
140 int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
141 b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
142 return b_offset_term_s32;
143}
144
145inline int32x4x4_t get_k_offset(int32_t k_offset)
146{
147 return
148 {
149 {
150 vdupq_n_s32(k_offset),
151 vdupq_n_s32(k_offset),
152 vdupq_n_s32(k_offset),
153 vdupq_n_s32(k_offset)
154 }
155 };
156}
157
158template <bool is_bounded_relu>
159inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8)
160{
161 const static int32x4_t zero_s32 = vdupq_n_s32(0);
162
163 // Shift final result (negative value shift right)
164 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
165 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
166 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
167 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
168
169 // Saturate negative values
170 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
171 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
172 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
173 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
174
175 // Convert S32 to S16
176 const int16x8x2_t in_s16 =
177 {
178 {
179 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
180 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
181 }
182 };
183
184 // Convert S16 to U8
185 uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
186
187 if(is_bounded_relu)
188 {
189 out_u8 = vmaxq_u8(out_u8, min_u8);
190 out_u8 = vminq_u8(out_u8, max_u8);
191 }
192
193 return out_u8;
194}
195
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100196template <bool is_bounded_relu>
197inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8)
198{
199 const static int32x4_t zero_s32 = vdupq_n_s32(0);
200
201 // Shift final result (negative value shift right)
202 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
203 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
204 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
205 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
206
207 // Saturate negative values
208 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
209 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
210 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
211 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
212
213 // Convert S32 to S16
214 const int16x8x2_t in_s16 =
215 {
216 {
217 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
218 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
219 }
220 };
221
222 // Convert S16 to S8
223 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
224
225 if(is_bounded_relu)
226 {
227 out_s8 = vmaxq_s8(out_s8, min_s8);
228 out_s8 = vminq_s8(out_s8, max_s8);
229 }
230
231 return out_s8;
232}
233
234template <bool is_bounded_relu>
235inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8)
236{
237 const static int32x4_t zero_s32 = vdupq_n_s32(0);
238
239 // Shift final result (negative value shift right)
240 in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
241 in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
242 in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
243 in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
244
245 // Saturate negative values
246 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
247 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
248 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
249 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
250
251 // Convert S32 to S16
252 const int16x8x2_t in_s16 =
253 {
254 {
255 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
256 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
257 }
258 };
259
260 // Convert S16 to S8
261 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
262
263 if(is_bounded_relu)
264 {
265 out_s8 = vmaxq_s8(out_s8, min_s8);
266 out_s8 = vminq_s8(out_s8, max_s8);
267 }
268
269 return out_s8;
270}
271
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000272template <typename T>
273struct VectorTyper
274{
275 using stype = T;
276 using vtype = typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
277};
278
George Wort2d7e6832019-02-22 16:37:41 +0000279inline Window get_win_vector_sum(const Window &window)
280{
281 Window win_vector_sum(window);
282 win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
283 win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
284 return win_vector_sum;
285}
286
287inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
288{
289 Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
290 return vector_sum_col_it;
291}
292
293inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
294{
295 Window win_vector_sum_row = get_win_vector_sum(window);
296 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
297 Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
298 return vector_sum_row_it;
299}
300
301inline Iterator get_bias_it(const Window &window, const ITensor *bias)
302{
303 Window win_bias(window);
304 win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
305 win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
306 Iterator bias_it(bias, win_bias);
307 return bias_it;
308}
309
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000310template <typename VT, bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point>
George Wort2d7e6832019-02-22 16:37:41 +0000311inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000312 const int32x4_t result_offset_s32, const int32x4_t result_shift_s32,
313 typename VT::vtype min_vec, typename VT::vtype max_vec,
George Wort2d7e6832019-02-22 16:37:41 +0000314 int32_t a_offset, int32_t b_offset, int32_t k_offset,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100315 int32_t multiplier, int32_t shift, int32_t offset, int32_t min_bound, int32_t max_bound,
316 int window_step_x, int window_start_x, int window_end_x)
George Wort2d7e6832019-02-22 16:37:41 +0000317{
318 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
319 if(!is_fixed_point)
320 {
321 // Combine quantization offset with other offsets.
322 offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
323 }
324 if(has_a_offset && has_b_offset)
325 {
326 offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
327 }
328 if(has_b_offset)
329 {
330 offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
331 }
332
333 int x = window_start_x;
334 for(; x <= (window_end_x - window_step_x); x += window_step_x)
335 {
336 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
337
338 if(has_a_offset)
339 {
340 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
341 }
342 if(has_bias)
343 {
344 in_s32 = add_s32(in_s32, load(bias_ptr, x));
345 }
346 if(!is_fixed_point || has_b_offset)
347 {
348 in_s32 = add_s32(in_s32, offset_term_s32);
349 }
350 if(!is_fixed_point)
351 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100352 in_s32 = mul_s32(in_s32, multiplier);
George Wort2d7e6832019-02-22 16:37:41 +0000353 }
354
355 if(is_fixed_point)
356 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000357 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
358 finalize_quantization<is_bounded_relu>(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec));
George Wort2d7e6832019-02-22 16:37:41 +0000359 }
360 else
361 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000362 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
363 finalize_quantization_floating_point<is_bounded_relu>(in_s32, result_shift_s32, min_vec, max_vec));
George Wort2d7e6832019-02-22 16:37:41 +0000364 }
365 }
366 // Compute left-over elements
367 for(; x < window_end_x; ++x)
368 {
369 int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
370
371 if(has_a_offset)
372 {
373 in_value += (*(vector_sum_col_ptr + x) * a_offset);
374 }
375 if(has_bias)
376 {
377 in_value += *(bias_ptr + x);
378 }
379
380 if(is_fixed_point)
381 {
382 // Finalize and store the result
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000383 *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = finalize_quantization<is_bounded_relu>(in_value, multiplier, shift, offset,
384 static_cast<typename VT::stype>(min_bound),
385 static_cast<typename VT::stype>(max_bound));
George Wort2d7e6832019-02-22 16:37:41 +0000386 }
387 else
388 {
389 // Finalize quantization
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100390 in_value = (in_value * multiplier) >> shift;
George Wort2d7e6832019-02-22 16:37:41 +0000391
392 // Bound and store the result
393 if(is_bounded_relu)
394 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000395 in_value = static_cast<typename VT::stype>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
George Wort2d7e6832019-02-22 16:37:41 +0000396 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000397 *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = static_cast<typename VT::stype>(std::max<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::lowest()),
398 std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
George Wort2d7e6832019-02-22 16:37:41 +0000399 }
400 }
401}
402
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100403template <bool has_a_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point>
404inline void run_offset_contribution_output_stage_window_symm(const int32_t *vector_sum_col_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
405 const int32_t *result_multipliers, const int32_t *result_shifts,
406 const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
407 int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
408 int window_step_x, int window_start_x, int window_end_x)
409{
410 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
411 if(!is_fixed_point)
412 {
413 // Combine quantization offset with other offsets.
414 offset_term_s32 = add_s32(offset_term_s32, result_offset);
415 }
416
417 int x = window_start_x;
418 for(; x <= (window_end_x - window_step_x); x += window_step_x)
419 {
420 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
421
422 if(has_a_offset)
423 {
424 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
425 }
426 if(has_bias)
427 {
428 in_s32 = add_s32(in_s32, load(bias_ptr, x));
429 }
430 if(!is_fixed_point)
431 {
432 in_s32 = add_s32(in_s32, offset_term_s32);
433 in_s32 = mul_s32(in_s32, result_multipliers + x);
434 }
435
436 if(is_fixed_point)
437 {
438 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_symm<is_bounded_relu>(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8));
439 }
440 else
441 {
442 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point<is_bounded_relu>(in_s32, load(result_shifts, x), min_s8, max_s8));
443 }
444 }
445 // Compute left-over elements
446 for(; x < window_end_x; ++x)
447 {
448 int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
449
450 if(has_a_offset)
451 {
452 in_value += (*(vector_sum_col_ptr + x) * a_offset);
453 }
454 if(has_bias)
455 {
456 in_value += *(bias_ptr + x);
457 }
458
459 if(is_fixed_point)
460 {
461 // Finalize and store the result
462 *(out_it.ptr() + x) = finalize_quantization<is_bounded_relu>(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound), static_cast<int8_t>(max_bound));
463 }
464 else
465 {
466 // Finalize quantization
467 in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
468
469 // Bound and store the result
470 if(is_bounded_relu)
471 {
472 in_value = static_cast<int8_t>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
473 }
474 *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
475 }
476 }
477}
478
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000479template <typename T, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point>
George Wort2d7e6832019-02-22 16:37:41 +0000480void run_offset_contribution_output_stage(const Window &window,
481 const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
482 int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
483 GEMMLowpOutputStageInfo output_stage)
484{
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000485 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
486 using Typer = VectorTyper<T>;
487
George Wort2d7e6832019-02-22 16:37:41 +0000488 const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
489 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
490
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100491 const int32_t multiplier = output_stage.gemmlowp_multiplier;
492 const int32_t shift = output_stage.gemmlowp_shift;
493 const int32_t offset = output_stage.gemmlowp_offset;
494 const int32_t min_bound = output_stage.gemmlowp_min_bound;
495 const int32_t max_bound = output_stage.gemmlowp_max_bound;
496
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000497 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
498 const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? shift : -shift);
499 const auto min_vec = wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
500 const auto max_vec = wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
George Wort2d7e6832019-02-22 16:37:41 +0000501
502 const int window_step_x = 16;
503 const auto window_start_x = static_cast<int>(window.x().start());
504 const auto window_end_x = static_cast<int>(window.x().end());
505
506 Window win(window);
507 win.set(Window::DimX, Window::Dimension(0, 1, 1));
508
509 Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
510
511 Iterator mm_result_it(mm_result, win);
512 Iterator out_it(output, win);
513
514 if((a_offset != 0) && (b_offset != 0))
515 {
516 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
517 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
518
519 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
520 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
521
522 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
523
524 // Offset in case vector_sum_col is batched
525 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
526
527 if(bias != nullptr)
528 {
529 Iterator bias_it = get_bias_it(collapsed_window, bias);
530 execute_window_loop(collapsed_window, [&](const Coordinates & id)
531 {
532 const int batch_id = id.z() / depth_input;
533 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
534 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
535 + id.y() + (id.z() % depth_input) * height_input;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000536 run_offset_contribution_output_stage_window<Typer, true, true, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()),
537 mm_result_it,
538 out_it,
539 result_offset_s32, result_shift_s32,
540 min_vec, max_vec, a_offset, b_offset, k_offset,
541 multiplier, shift, offset, min_bound, max_bound,
542 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000543 },
544 vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
545 }
546 else
547 {
548 execute_window_loop(collapsed_window, [&](const Coordinates & id)
549 {
550 const int batch_id = id.z() / depth_input;
551 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
552 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
553 + id.y() + (id.z() % depth_input) * height_input;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000554 run_offset_contribution_output_stage_window<Typer, true, true, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
555 result_offset_s32, result_shift_s32,
556 min_vec, max_vec, a_offset, b_offset, k_offset,
557 multiplier, shift, offset, min_bound, max_bound,
558 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000559 },
560 vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
561 }
562 }
563 else if((a_offset == 0) && (b_offset != 0))
564 {
565 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
566
567 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
568
569 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
570
571 if(bias != nullptr)
572 {
573 Iterator bias_it = get_bias_it(collapsed_window, bias);
574 execute_window_loop(collapsed_window, [&](const Coordinates & id)
575 {
576 const int batch_id = id.z() / depth_input;
577 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
578 + id.y() + (id.z() % depth_input) * height_input;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000579 run_offset_contribution_output_stage_window<Typer, false, true, true, is_bounded_relu, is_fixed_point>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
580 out_it,
581 result_offset_s32, result_shift_s32,
582 min_vec, max_vec, a_offset, b_offset, k_offset,
583 multiplier, shift, offset, min_bound, max_bound,
584 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000585 },
586 vector_sum_row_it, bias_it, mm_result_it, out_it);
587 }
588 else
589 {
590 execute_window_loop(collapsed_window, [&](const Coordinates & id)
591 {
592 const int batch_id = id.z() / depth_input;
593 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
594 + id.y() + (id.z() % depth_input) * height_input;
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000595 run_offset_contribution_output_stage_window<Typer, false, true, false, is_bounded_relu, is_fixed_point>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
596 result_offset_s32, result_shift_s32,
597 min_vec, max_vec, a_offset, b_offset, k_offset,
598 multiplier, shift, offset, min_bound, max_bound,
599 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000600 },
601 vector_sum_row_it, mm_result_it, out_it);
602 }
603 }
604 else if((a_offset != 0) && (b_offset == 0))
605 {
606 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
607
608 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
609
610 // Offset in case vector_sum_col is batched
611 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
612
613 if(bias != nullptr)
614 {
615 Iterator bias_it = get_bias_it(collapsed_window, bias);
616 execute_window_loop(collapsed_window, [&](const Coordinates & id)
617 {
618 const int batch_id = id.z() / depth_input;
619 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000620 run_offset_contribution_output_stage_window<Typer, true, false, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
621 out_it,
622 result_offset_s32, result_shift_s32,
623 min_vec, max_vec, a_offset, b_offset, k_offset,
624 multiplier, shift, offset, min_bound, max_bound,
625 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000626 },
627 vector_sum_col_it, bias_it, mm_result_it, out_it);
628 }
629 else
630 {
631 execute_window_loop(collapsed_window, [&](const Coordinates & id)
632 {
633 const int batch_id = id.z() / depth_input;
634 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000635 run_offset_contribution_output_stage_window<Typer, true, false, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
636 result_offset_s32, result_shift_s32,
637 min_vec, max_vec, a_offset, b_offset, k_offset,
638 multiplier, shift, offset, min_bound, max_bound,
639 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000640 },
641 vector_sum_col_it, mm_result_it, out_it);
642 }
643 }
644 else
645 {
646 if(bias != nullptr)
647 {
648 Iterator bias_it = get_bias_it(collapsed_window, bias);
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100649 execute_window_loop(collapsed_window, [&](const Coordinates &)
George Wort2d7e6832019-02-22 16:37:41 +0000650 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000651 run_offset_contribution_output_stage_window<Typer, false, false, true, is_bounded_relu, is_fixed_point>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
652 result_offset_s32, result_shift_s32,
653 min_vec, max_vec, a_offset, b_offset, k_offset,
654 multiplier, shift, offset, min_bound, max_bound,
655 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000656 },
657 bias_it, mm_result_it, out_it);
658 }
659 else
660 {
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100661 execute_window_loop(collapsed_window, [&](const Coordinates &)
George Wort2d7e6832019-02-22 16:37:41 +0000662 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000663 run_offset_contribution_output_stage_window<Typer, false, false, false, is_bounded_relu, is_fixed_point>(nullptr, nullptr, nullptr, mm_result_it, out_it,
664 result_offset_s32, result_shift_s32,
665 min_vec, max_vec, a_offset, b_offset, k_offset,
666 multiplier, shift, offset, min_bound, max_bound,
667 window_step_x, window_start_x, window_end_x);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100668 },
669 mm_result_it, out_it);
670 }
671 return;
672 }
673}
674
675template <bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point>
676void run_offset_contribution_output_stage_symm(const Window &window,
677 const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
678 int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
679 GEMMLowpOutputStageInfo output_stage)
680{
681 ARM_COMPUTE_UNUSED(vector_sum_row, b_offset, k_offset);
682
683 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
684
685 const int32_t offset = output_stage.gemmlowp_offset;
686 const int32_t min_bound = output_stage.gemmlowp_min_bound;
687 const int32_t max_bound = output_stage.gemmlowp_max_bound;
688
689 const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
690 const int32_t *result_shifts = output_stage.gemmlowp_shifts.data();
691 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
692 const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(min_bound));
693 const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(max_bound));
694
695 const int window_step_x = 16;
696 const auto window_start_x = static_cast<int>(window.x().start());
697 const auto window_end_x = static_cast<int>(window.x().end());
698
699 Window win(window);
700 win.set(Window::DimX, Window::Dimension(0, 1, 1));
701
702 Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
703
704 Iterator mm_result_it(mm_result, win);
705 Iterator out_it(output, win);
706
707 if(a_offset != 0)
708 {
709 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
710
711 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
712
713 // Offset in case vector_sum_col is batched
714 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
715
716 if(bias != nullptr)
717 {
718 Iterator bias_it = get_bias_it(collapsed_window, bias);
719 execute_window_loop(collapsed_window, [&](const Coordinates & id)
720 {
721 const int batch_id = id.z() / depth_input;
722 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
723 run_offset_contribution_output_stage_window_symm<true, true, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
724 result_multipliers, result_shifts,
725 result_offset_s32, min_s8, max_s8,
726 a_offset, offset, min_bound, max_bound,
727 window_step_x, window_start_x, window_end_x);
728 },
729 vector_sum_col_it, bias_it, mm_result_it, out_it);
730 }
731 else
732 {
733 execute_window_loop(collapsed_window, [&](const Coordinates & id)
734 {
735 const int batch_id = id.z() / depth_input;
736 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
737 run_offset_contribution_output_stage_window_symm<true, false, is_bounded_relu, is_fixed_point>(vector_sum_col_ptr, nullptr, mm_result_it, out_it,
738 result_multipliers, result_shifts,
739 result_offset_s32, min_s8, max_s8,
740 a_offset, offset, min_bound, max_bound,
741 window_step_x, window_start_x, window_end_x);
742 },
743 vector_sum_col_it, mm_result_it, out_it);
744 }
745 }
746 else
747 {
748 if(bias != nullptr)
749 {
750 Iterator bias_it = get_bias_it(collapsed_window, bias);
751 execute_window_loop(collapsed_window, [&](const Coordinates &)
752 {
753 run_offset_contribution_output_stage_window_symm<false, true, is_bounded_relu, is_fixed_point>(nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
754 result_multipliers, result_shifts,
755 result_offset_s32, min_s8, max_s8,
756 a_offset, offset, min_bound, max_bound,
757 window_step_x, window_start_x, window_end_x);
758 },
759 bias_it, mm_result_it, out_it);
760 }
761 else
762 {
763 execute_window_loop(collapsed_window, [&](const Coordinates &)
764 {
765 run_offset_contribution_output_stage_window_symm<false, false, is_bounded_relu, is_fixed_point>(nullptr, nullptr, mm_result_it, out_it,
766 result_multipliers, result_shifts,
767 result_offset_s32, min_s8, max_s8,
768 a_offset, offset, min_bound, max_bound,
769 window_step_x, window_start_x, window_end_x);
George Wort2d7e6832019-02-22 16:37:41 +0000770 },
771 mm_result_it, out_it);
772 }
773 return;
774 }
775}
776
777Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
778 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
779{
780 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100781 if(output->data_type() == DataType::QASYMM8)
782 {
783 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 255);
784 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < 0);
785 }
786 else
787 {
788 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > 127);
789 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < -128);
790 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) > 1 && output_stage.gemmlowp_multipliers.size() > 1 && b_offset != 0);
791 }
792 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000793 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN && output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
794
795 if(bias != nullptr)
796 {
797 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
798 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
799 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
800 }
801
802 // If a_offset == 0, vector_sum_col can be a nullptr
803 if(a_offset != 0)
804 {
805 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
806 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
807 }
808
809 // If b_offset == 0, vector_sum_row can be a nullptr
810 if(b_offset != 0)
811 {
812 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
813
814 // Check if input is a 3D reinterpretation
815 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
816
817 // Validate input
818 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
819 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
820
821 TensorShape output_shape = output->tensor_shape();
822 if(output_shape.num_dimensions() > 1)
823 {
824 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
825
826 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
827 vector_sum_row_shape.collapse_from(1);
828 output_shape.collapse_from(output_batch_idx);
829
830 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
831 "mm_result tensor must have the same number of batches of output tensor");
832
833 if(a_offset != 0)
834 {
835 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
836 vector_sum_col_shape.collapse_from(1);
837
838 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
839 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
840 }
841 }
842 }
843
844 if(output->total_size() != 0)
845 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100846 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
George Wort2d7e6832019-02-22 16:37:41 +0000847 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
848 }
849
850 return Status{};
851}
852
853std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
854{
855 // Output auto inizialitation if not yet initialized
856 auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
857
858 // Configure kernel window
859 Window win = calculate_max_window(*mm_result, Steps());
860
861 // Note: This kernel performs 16 elements per iteration.
862 // However, since we use a left-over for loop, we cannot have any read or write out of memory
863 // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
864 Coordinates coord;
865 coord.set_num_dimensions(output->num_dimensions());
866 output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
867
868 return std::make_pair(Status{}, win);
869}
870
871NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100872get_configured_function(const ITensor *mm_result, const ITensor *vector_sum_row, const ITensor *output, GEMMLowpOutputStageInfo output_stage)
George Wort2d7e6832019-02-22 16:37:41 +0000873{
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000874 static std::map<uint8_t, NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction> map_function_qasymm =
George Wort2d7e6832019-02-22 16:37:41 +0000875 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000876 { 0, &run_offset_contribution_output_stage<uint8_t, false, false, false> },
877 { 1, &run_offset_contribution_output_stage<uint8_t, true, false, false> },
878 { 2, &run_offset_contribution_output_stage<uint8_t, false, true, false> },
879 { 3, &run_offset_contribution_output_stage<uint8_t, true, true, false> },
880 { 4, &run_offset_contribution_output_stage<uint8_t, false, false, true> },
881 { 5, &run_offset_contribution_output_stage<uint8_t, true, false, true> },
882 { 6, &run_offset_contribution_output_stage<uint8_t, false, true, true> },
883 { 7, &run_offset_contribution_output_stage<uint8_t, true, true, true> },
884 { 8, &run_offset_contribution_output_stage<int8_t, false, false, false> },
885 { 9, &run_offset_contribution_output_stage<int8_t, true, false, false> },
886 { 10, &run_offset_contribution_output_stage<int8_t, false, true, false> },
887 { 11, &run_offset_contribution_output_stage<int8_t, true, true, false> },
888 { 12, &run_offset_contribution_output_stage<int8_t, false, false, true> },
889 { 13, &run_offset_contribution_output_stage<int8_t, true, false, true> },
890 { 14, &run_offset_contribution_output_stage<int8_t, false, true, true> },
891 { 15, &run_offset_contribution_output_stage<int8_t, true, true, true> },
892 };
893
894 static std::map<uint8_t, NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction> map_function_qsymm =
895 {
896 { 0, &run_offset_contribution_output_stage_symm<false, false, false> },
897 { 1, &run_offset_contribution_output_stage_symm<true, false, false> },
898 { 2, &run_offset_contribution_output_stage_symm<false, true, false> },
899 { 3, &run_offset_contribution_output_stage_symm<true, true, false> },
900 { 4, &run_offset_contribution_output_stage_symm<false, false, true> },
901 { 5, &run_offset_contribution_output_stage_symm<true, false, true> },
902 { 6, &run_offset_contribution_output_stage_symm<false, true, true> },
903 { 7, &run_offset_contribution_output_stage_symm<true, true, true> }
George Wort2d7e6832019-02-22 16:37:41 +0000904 };
905
906 // Check if input is a 3D reinterpretation
907 const bool reinterpret_as_3d = vector_sum_row != nullptr
908 && mm_result->info()->num_dimensions() > 1
909 && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
910
911 // Check if we need to clamp the result using min and max
912 const bool is_bounded_relu = ((output_stage.gemmlowp_min_bound != output_stage.gemmlowp_max_bound)
913 && !(output_stage.gemmlowp_min_bound == 0 && output_stage.gemmlowp_max_bound == 255));
914
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100915 // Check if we need to perform fixed point requantization
George Wort2d7e6832019-02-22 16:37:41 +0000916 const bool is_fixed_point = output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
917
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100918 // Check if symmetric per-channel execution
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000919 const bool is_signed = output->info()->data_type() == DataType::QASYMM8_SIGNED;
920
921 // Check if symmetric per-channel execution
922 const bool is_symm = output_stage.is_quantized_per_channel;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100923
George Wort2d7e6832019-02-22 16:37:41 +0000924 // key acts as a bitset, setting the first bit on reinterpret_as_3d,
925 // the second on is_bounded_relu, and the third on is_fixed_point.
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000926 uint8_t key = (reinterpret_as_3d ? 1UL : 0UL) | ((is_bounded_relu ? 1UL : 0UL) << 1) | ((is_fixed_point ? 1UL : 0UL) << 2);
927 if(is_symm)
928 {
929 return map_function_qsymm.find(key)->second;
930 }
931 else
932 {
933 key |= ((is_signed ? 1UL : 0UL) << 3);
934 return map_function_qasymm.find(key)->second;
935 }
George Wort2d7e6832019-02-22 16:37:41 +0000936}
937} // namespace
938
939NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel()
940 : _function(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
941 _output_stage(GEMMLowpOutputStageInfo())
942
943{
944}
945
946void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100947 const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
948 int32_t k, int32_t a_offset, int32_t b_offset,
949 GEMMLowpOutputStageInfo output_stage)
George Wort2d7e6832019-02-22 16:37:41 +0000950{
951 // Perform validate step
952 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
953
954 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
955 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
956 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
957 bias != nullptr ? bias->info() : nullptr, // NOLINT
958 output->info(), a_offset, b_offset, output_stage)); // NOLINT
959
960 _vector_sum_col = vector_sum_col;
961 _vector_sum_row = vector_sum_row;
962 _bias = bias;
963 _mm_result = mm_result;
964 _output = output;
965 _a_offset = a_offset;
966 _b_offset = b_offset;
967 _k_offset = a_offset * b_offset * k;
968 _output_stage = output_stage;
969
970 // If a_offset == 0, vector_sum_col can be a nullptr
971 if(a_offset != 0)
972 {
973 // Check if vector_sum_col_shape should be slidden or not
974 // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
975 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
976 _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
977 }
978
979 // Configure kernel window
980 auto win_config = validate_and_configure_window(mm_result->info(), output->info());
981 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
982 INEKernel::configure(win_config.second);
983
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100984 _function = get_configured_function(mm_result, vector_sum_row, output, output_stage);
George Wort2d7e6832019-02-22 16:37:41 +0000985}
986
987Status NEGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col,
988 const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
989 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
990{
991 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
992 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
993 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first);
994 return Status{};
995}
996
997void NEGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, const ThreadInfo &info)
998{
999 ARM_COMPUTE_UNUSED(info);
1000 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
1001 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
1002 _function(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage);
1003}
1004
1005} // namespace arm_compute