blob: 3c8f5ae0222fba5e6ee4443d3f8c008ff6d67cd3 [file] [log] [blame]
George Wort2d7e6832019-02-22 16:37:41 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019-2020 Arm Limited.
George Wort2d7e6832019-02-22 16:37:41 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouebcebf12020-10-21 00:04:14 +010024#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
George Wort2d7e6832019-02-22 16:37:41 +000025
George Wort2d7e6832019-02-22 16:37:41 +000026#include "arm_compute/core/Error.h"
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/ITensor.h"
George Wort2d7e6832019-02-22 16:37:41 +000029#include "arm_compute/core/TensorInfo.h"
30#include "arm_compute/core/Types.h"
31#include "arm_compute/core/Utils.h"
32#include "arm_compute/core/Validate.h"
33#include "arm_compute/core/Window.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010034#include "src/core/AccessWindowStatic.h"
Georgios Pinitasddb93bb2020-10-02 16:38:59 +010035#include "src/core/NEON/NEAsymm.h"
36#include "src/core/NEON/wrapper/wrapper.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010037#include "src/core/helpers/AutoConfiguration.h"
38#include "src/core/helpers/WindowHelpers.h"
George Wort2d7e6832019-02-22 16:37:41 +000039
40#include <arm_neon.h>
41#include <cstddef>
42#include <cstdint>
43#include <map>
44
45namespace arm_compute
46{
47class Coordinates;
48
49namespace
50{
51inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
52{
53 return
54 {
55 {
56 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
57 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
58 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
59 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
60 }
61 };
62}
63
64inline int32x4x4_t load(const int32_t *ptr, int32_t x)
65{
66 return
67 {
68 {
69 vld1q_s32(ptr + x + 0),
70 vld1q_s32(ptr + x + 4),
71 vld1q_s32(ptr + x + 8),
72 vld1q_s32(ptr + x + 12)
73 }
74 };
75}
76
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010077inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
78{
79 return
80 {
81 {
82 vaddq_s32(a.val[0], b),
83 vaddq_s32(a.val[1], b),
84 vaddq_s32(a.val[2], b),
85 vaddq_s32(a.val[3], b)
86 }
87 };
88}
89
90inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
91{
92 return
93 {
94 {
95 vaddq_s32(a.val[0], b.val[0]),
96 vaddq_s32(a.val[1], b.val[1]),
97 vaddq_s32(a.val[2], b.val[2]),
98 vaddq_s32(a.val[3], b.val[3])
99 }
100 };
101}
102
103inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
104{
105 return
106 {
107 {
108 vmulq_n_s32(a.val[0], mul_scalar),
109 vmulq_n_s32(a.val[1], mul_scalar),
110 vmulq_n_s32(a.val[2], mul_scalar),
111 vmulq_n_s32(a.val[3], mul_scalar)
112 }
113 };
114}
115
116inline int32x4x4_t mul_s32(int32x4x4_t &a, const int32_t *multilpier)
117{
118 return
119 {
120 {
121 vmulq_s32(a.val[0], vld1q_s32(multilpier)),
122 vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
123 vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
124 vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
125 }
126 };
127}
128
George Wort2d7e6832019-02-22 16:37:41 +0000129inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
130{
131 int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
132
133 a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
134 a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
135 a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
136 a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
137 return a_offset_term_s32;
138}
139
140inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
141{
142 int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
143 b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
144 return b_offset_term_s32;
145}
146
147inline int32x4x4_t get_k_offset(int32_t k_offset)
148{
149 return
150 {
151 {
152 vdupq_n_s32(k_offset),
153 vdupq_n_s32(k_offset),
154 vdupq_n_s32(k_offset),
155 vdupq_n_s32(k_offset)
156 }
157 };
158}
159
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100160inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
George Wort2d7e6832019-02-22 16:37:41 +0000161{
162 const static int32x4_t zero_s32 = vdupq_n_s32(0);
163
164 // Shift final result (negative value shift right)
165 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
166 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
167 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
168 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
169
170 // Saturate negative values
171 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
172 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
173 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
174 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
175
176 // Convert S32 to S16
177 const int16x8x2_t in_s16 =
178 {
179 {
180 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
181 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
182 }
183 };
184
185 // Convert S16 to U8
186 uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
187
188 if(is_bounded_relu)
189 {
190 out_u8 = vmaxq_u8(out_u8, min_u8);
191 out_u8 = vminq_u8(out_u8, max_u8);
192 }
193
194 return out_u8;
195}
196
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100197inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100198{
199 const static int32x4_t zero_s32 = vdupq_n_s32(0);
200
201 // Shift final result (negative value shift right)
202 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
203 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
204 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
205 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
206
207 // Saturate negative values
208 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
209 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
210 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
211 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
212
213 // Convert S32 to S16
214 const int16x8x2_t in_s16 =
215 {
216 {
217 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
218 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
219 }
220 };
221
222 // Convert S16 to S8
223 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
224
225 if(is_bounded_relu)
226 {
227 out_s8 = vmaxq_s8(out_s8, min_s8);
228 out_s8 = vminq_s8(out_s8, max_s8);
229 }
230
231 return out_s8;
232}
233
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100234inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100235{
236 const static int32x4_t zero_s32 = vdupq_n_s32(0);
237
238 // Shift final result (negative value shift right)
239 in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
240 in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
241 in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
242 in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
243
244 // Saturate negative values
245 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
246 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
247 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
248 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
249
250 // Convert S32 to S16
251 const int16x8x2_t in_s16 =
252 {
253 {
254 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
255 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
256 }
257 };
258
259 // Convert S16 to S8
260 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
261
262 if(is_bounded_relu)
263 {
264 out_s8 = vmaxq_s8(out_s8, min_s8);
265 out_s8 = vminq_s8(out_s8, max_s8);
266 }
267
268 return out_s8;
269}
270
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000271template <typename T>
272struct VectorTyper
273{
274 using stype = T;
275 using vtype = typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
276};
277
George Wort2d7e6832019-02-22 16:37:41 +0000278inline Window get_win_vector_sum(const Window &window)
279{
280 Window win_vector_sum(window);
281 win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
282 win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
283 return win_vector_sum;
284}
285
286inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
287{
288 Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
289 return vector_sum_col_it;
290}
291
292inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
293{
294 Window win_vector_sum_row = get_win_vector_sum(window);
295 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
296 Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
297 return vector_sum_row_it;
298}
299
300inline Iterator get_bias_it(const Window &window, const ITensor *bias)
301{
302 Window win_bias(window);
303 win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
304 win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
305 Iterator bias_it(bias, win_bias);
306 return bias_it;
307}
308
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100309template <typename VT>
George Wort2d7e6832019-02-22 16:37:41 +0000310inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000311 const int32x4_t result_offset_s32, const int32x4_t result_shift_s32,
312 typename VT::vtype min_vec, typename VT::vtype max_vec,
George Wort2d7e6832019-02-22 16:37:41 +0000313 int32_t a_offset, int32_t b_offset, int32_t k_offset,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100314 int32_t multiplier, int32_t shift, int32_t offset, int32_t min_bound, int32_t max_bound,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100315 int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
George Wort2d7e6832019-02-22 16:37:41 +0000316{
317 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
318 if(!is_fixed_point)
319 {
320 // Combine quantization offset with other offsets.
321 offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
322 }
323 if(has_a_offset && has_b_offset)
324 {
325 offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
326 }
327 if(has_b_offset)
328 {
329 offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
330 }
331
332 int x = window_start_x;
333 for(; x <= (window_end_x - window_step_x); x += window_step_x)
334 {
335 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
336
337 if(has_a_offset)
338 {
339 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
340 }
341 if(has_bias)
342 {
343 in_s32 = add_s32(in_s32, load(bias_ptr, x));
344 }
345 if(!is_fixed_point || has_b_offset)
346 {
347 in_s32 = add_s32(in_s32, offset_term_s32);
348 }
349 if(!is_fixed_point)
350 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100351 in_s32 = mul_s32(in_s32, multiplier);
George Wort2d7e6832019-02-22 16:37:41 +0000352 }
353
354 if(is_fixed_point)
355 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000356 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100357 finalize_quantization(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec, is_bounded_relu));
George Wort2d7e6832019-02-22 16:37:41 +0000358 }
359 else
360 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000361 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100362 finalize_quantization_floating_point(in_s32, result_shift_s32, min_vec, max_vec, is_bounded_relu));
George Wort2d7e6832019-02-22 16:37:41 +0000363 }
364 }
365 // Compute left-over elements
366 for(; x < window_end_x; ++x)
367 {
368 int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
369
370 if(has_a_offset)
371 {
372 in_value += (*(vector_sum_col_ptr + x) * a_offset);
373 }
374 if(has_bias)
375 {
376 in_value += *(bias_ptr + x);
377 }
378
379 if(is_fixed_point)
380 {
381 // Finalize and store the result
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100382 *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = finalize_quantization(in_value, multiplier, shift, offset,
383 static_cast<typename VT::stype>(min_bound),
384 static_cast<typename VT::stype>(max_bound), is_bounded_relu);
George Wort2d7e6832019-02-22 16:37:41 +0000385 }
386 else
387 {
388 // Finalize quantization
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100389 in_value = (in_value * multiplier) >> shift;
George Wort2d7e6832019-02-22 16:37:41 +0000390
391 // Bound and store the result
392 if(is_bounded_relu)
393 {
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000394 in_value = static_cast<typename VT::stype>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
George Wort2d7e6832019-02-22 16:37:41 +0000395 }
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000396 *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = static_cast<typename VT::stype>(std::max<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::lowest()),
397 std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
George Wort2d7e6832019-02-22 16:37:41 +0000398 }
399 }
400}
401
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100402inline void run_offset_contribution_output_stage_window_symm(const int32_t *vector_sum_col_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
403 const int32_t *result_multipliers, const int32_t *result_shifts,
404 const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
405 int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100406 int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100407{
408 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
409 if(!is_fixed_point)
410 {
411 // Combine quantization offset with other offsets.
412 offset_term_s32 = add_s32(offset_term_s32, result_offset);
413 }
414
415 int x = window_start_x;
416 for(; x <= (window_end_x - window_step_x); x += window_step_x)
417 {
418 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
419
420 if(has_a_offset)
421 {
422 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
423 }
424 if(has_bias)
425 {
426 in_s32 = add_s32(in_s32, load(bias_ptr, x));
427 }
428 if(!is_fixed_point)
429 {
430 in_s32 = add_s32(in_s32, offset_term_s32);
431 in_s32 = mul_s32(in_s32, result_multipliers + x);
432 }
433
434 if(is_fixed_point)
435 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100436 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_symm(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8, is_bounded_relu));
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100437 }
438 else
439 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100440 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point(in_s32, load(result_shifts, x), min_s8, max_s8, is_bounded_relu));
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100441 }
442 }
443 // Compute left-over elements
444 for(; x < window_end_x; ++x)
445 {
446 int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
447
448 if(has_a_offset)
449 {
450 in_value += (*(vector_sum_col_ptr + x) * a_offset);
451 }
452 if(has_bias)
453 {
454 in_value += *(bias_ptr + x);
455 }
456
457 if(is_fixed_point)
458 {
459 // Finalize and store the result
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100460 *(out_it.ptr() + x) = finalize_quantization(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound), static_cast<int8_t>(max_bound), is_bounded_relu);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100461 }
462 else
463 {
464 // Finalize quantization
465 in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
466
467 // Bound and store the result
468 if(is_bounded_relu)
469 {
470 in_value = static_cast<int8_t>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
471 }
472 *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
473 }
474 }
475}
476
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100477template <typename T>
George Wort2d7e6832019-02-22 16:37:41 +0000478void run_offset_contribution_output_stage(const Window &window,
479 const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
480 int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100481 GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
George Wort2d7e6832019-02-22 16:37:41 +0000482{
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000483 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
484 using Typer = VectorTyper<T>;
485
George Wort2d7e6832019-02-22 16:37:41 +0000486 const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
487 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
488
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100489 const int32_t multiplier = output_stage.gemmlowp_multiplier;
490 const int32_t shift = output_stage.gemmlowp_shift;
491 const int32_t offset = output_stage.gemmlowp_offset;
492 const int32_t min_bound = output_stage.gemmlowp_min_bound;
493 const int32_t max_bound = output_stage.gemmlowp_max_bound;
494
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000495 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
496 const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? shift : -shift);
497 const auto min_vec = wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
498 const auto max_vec = wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
George Wort2d7e6832019-02-22 16:37:41 +0000499
500 const int window_step_x = 16;
501 const auto window_start_x = static_cast<int>(window.x().start());
502 const auto window_end_x = static_cast<int>(window.x().end());
503
504 Window win(window);
505 win.set(Window::DimX, Window::Dimension(0, 1, 1));
506
507 Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
508
509 Iterator mm_result_it(mm_result, win);
510 Iterator out_it(output, win);
511
512 if((a_offset != 0) && (b_offset != 0))
513 {
514 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
515 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
516
517 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
518 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
519
520 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
521
522 // Offset in case vector_sum_col is batched
523 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
524
525 if(bias != nullptr)
526 {
527 Iterator bias_it = get_bias_it(collapsed_window, bias);
528 execute_window_loop(collapsed_window, [&](const Coordinates & id)
529 {
530 const int batch_id = id.z() / depth_input;
531 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
532 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
533 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100534 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()),
535 mm_result_it,
536 out_it,
537 result_offset_s32, result_shift_s32,
538 min_vec, max_vec, a_offset, b_offset, k_offset,
539 multiplier, shift, offset, min_bound, max_bound,
540 window_step_x, window_start_x, window_end_x, true, true, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000541 },
542 vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
543 }
544 else
545 {
546 execute_window_loop(collapsed_window, [&](const Coordinates & id)
547 {
548 const int batch_id = id.z() / depth_input;
549 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
550 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
551 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100552 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
553 result_offset_s32, result_shift_s32,
554 min_vec, max_vec, a_offset, b_offset, k_offset,
555 multiplier, shift, offset, min_bound, max_bound,
556 window_step_x, window_start_x, window_end_x, true, true, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000557 },
558 vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
559 }
560 }
561 else if((a_offset == 0) && (b_offset != 0))
562 {
563 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
564
565 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
566
567 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
568
569 if(bias != nullptr)
570 {
571 Iterator bias_it = get_bias_it(collapsed_window, bias);
572 execute_window_loop(collapsed_window, [&](const Coordinates & id)
573 {
574 const int batch_id = id.z() / depth_input;
575 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
576 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100577 run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
578 out_it,
579 result_offset_s32, result_shift_s32,
580 min_vec, max_vec, a_offset, b_offset, k_offset,
581 multiplier, shift, offset, min_bound, max_bound,
582 window_step_x, window_start_x, window_end_x, false, true, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000583 },
584 vector_sum_row_it, bias_it, mm_result_it, out_it);
585 }
586 else
587 {
588 execute_window_loop(collapsed_window, [&](const Coordinates & id)
589 {
590 const int batch_id = id.z() / depth_input;
591 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
592 + id.y() + (id.z() % depth_input) * height_input;
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100593 run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
594 result_offset_s32, result_shift_s32,
595 min_vec, max_vec, a_offset, b_offset, k_offset,
596 multiplier, shift, offset, min_bound, max_bound,
597 window_step_x, window_start_x, window_end_x, false, true, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000598 },
599 vector_sum_row_it, mm_result_it, out_it);
600 }
601 }
602 else if((a_offset != 0) && (b_offset == 0))
603 {
604 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
605
606 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
607
608 // Offset in case vector_sum_col is batched
609 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
610
611 if(bias != nullptr)
612 {
613 Iterator bias_it = get_bias_it(collapsed_window, bias);
614 execute_window_loop(collapsed_window, [&](const Coordinates & id)
615 {
616 const int batch_id = id.z() / depth_input;
617 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100618 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
619 out_it,
620 result_offset_s32, result_shift_s32,
621 min_vec, max_vec, a_offset, b_offset, k_offset,
622 multiplier, shift, offset, min_bound, max_bound,
623 window_step_x, window_start_x, window_end_x, true, false, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000624 },
625 vector_sum_col_it, bias_it, mm_result_it, out_it);
626 }
627 else
628 {
629 execute_window_loop(collapsed_window, [&](const Coordinates & id)
630 {
631 const int batch_id = id.z() / depth_input;
632 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100633 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
634 result_offset_s32, result_shift_s32,
635 min_vec, max_vec, a_offset, b_offset, k_offset,
636 multiplier, shift, offset, min_bound, max_bound,
637 window_step_x, window_start_x, window_end_x, true, false, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000638 },
639 vector_sum_col_it, mm_result_it, out_it);
640 }
641 }
642 else
643 {
644 if(bias != nullptr)
645 {
646 Iterator bias_it = get_bias_it(collapsed_window, bias);
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100647 execute_window_loop(collapsed_window, [&](const Coordinates &)
George Wort2d7e6832019-02-22 16:37:41 +0000648 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100649 run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
650 result_offset_s32, result_shift_s32,
651 min_vec, max_vec, a_offset, b_offset, k_offset,
652 multiplier, shift, offset, min_bound, max_bound,
653 window_step_x, window_start_x, window_end_x, false, false, true, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000654 },
655 bias_it, mm_result_it, out_it);
656 }
657 else
658 {
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100659 execute_window_loop(collapsed_window, [&](const Coordinates &)
George Wort2d7e6832019-02-22 16:37:41 +0000660 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100661 run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, nullptr, mm_result_it, out_it,
662 result_offset_s32, result_shift_s32,
663 min_vec, max_vec, a_offset, b_offset, k_offset,
664 multiplier, shift, offset, min_bound, max_bound,
665 window_step_x, window_start_x, window_end_x, false, false, false, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100666 },
667 mm_result_it, out_it);
668 }
669 return;
670 }
671}
672
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100673void run_offset_contribution_output_stage_symm(const Window &window,
674 const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
675 int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100676 GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100677{
678 ARM_COMPUTE_UNUSED(vector_sum_row, b_offset, k_offset);
679
680 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
681
682 const int32_t offset = output_stage.gemmlowp_offset;
683 const int32_t min_bound = output_stage.gemmlowp_min_bound;
684 const int32_t max_bound = output_stage.gemmlowp_max_bound;
685
686 const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
687 const int32_t *result_shifts = output_stage.gemmlowp_shifts.data();
688 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
689 const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(min_bound));
690 const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(max_bound));
691
692 const int window_step_x = 16;
693 const auto window_start_x = static_cast<int>(window.x().start());
694 const auto window_end_x = static_cast<int>(window.x().end());
695
696 Window win(window);
697 win.set(Window::DimX, Window::Dimension(0, 1, 1));
698
699 Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
700
701 Iterator mm_result_it(mm_result, win);
702 Iterator out_it(output, win);
703
704 if(a_offset != 0)
705 {
706 ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
707
708 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
709
710 // Offset in case vector_sum_col is batched
711 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
712
713 if(bias != nullptr)
714 {
715 Iterator bias_it = get_bias_it(collapsed_window, bias);
716 execute_window_loop(collapsed_window, [&](const Coordinates & id)
717 {
718 const int batch_id = id.z() / depth_input;
719 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100720 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
721 result_multipliers, result_shifts,
722 result_offset_s32, min_s8, max_s8,
723 a_offset, offset, min_bound, max_bound,
724 window_step_x, window_start_x, window_end_x, true, true, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100725 },
726 vector_sum_col_it, bias_it, mm_result_it, out_it);
727 }
728 else
729 {
730 execute_window_loop(collapsed_window, [&](const Coordinates & id)
731 {
732 const int batch_id = id.z() / depth_input;
733 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100734 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, nullptr, mm_result_it, out_it,
735 result_multipliers, result_shifts,
736 result_offset_s32, min_s8, max_s8,
737 a_offset, offset, min_bound, max_bound,
738 window_step_x, window_start_x, window_end_x, true, false, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100739 },
740 vector_sum_col_it, mm_result_it, out_it);
741 }
742 }
743 else
744 {
745 if(bias != nullptr)
746 {
747 Iterator bias_it = get_bias_it(collapsed_window, bias);
748 execute_window_loop(collapsed_window, [&](const Coordinates &)
749 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100750 run_offset_contribution_output_stage_window_symm(nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
751 result_multipliers, result_shifts,
752 result_offset_s32, min_s8, max_s8,
753 a_offset, offset, min_bound, max_bound,
754 window_step_x, window_start_x, window_end_x, false, true, is_bounded_relu, is_fixed_point);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100755 },
756 bias_it, mm_result_it, out_it);
757 }
758 else
759 {
760 execute_window_loop(collapsed_window, [&](const Coordinates &)
761 {
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100762 run_offset_contribution_output_stage_window_symm(nullptr, nullptr, mm_result_it, out_it,
763 result_multipliers, result_shifts,
764 result_offset_s32, min_s8, max_s8,
765 a_offset, offset, min_bound, max_bound,
766 window_step_x, window_start_x, window_end_x, false, false, is_bounded_relu, is_fixed_point);
George Wort2d7e6832019-02-22 16:37:41 +0000767 },
768 mm_result_it, out_it);
769 }
770 return;
771 }
772}
773
774Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
775 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
776{
777 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
Giorgio Arena1856ff72020-02-07 13:46:45 +0000778 if(output->data_type() != DataType::QASYMM8)
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100779 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100780 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) > 1 && output_stage.gemmlowp_multipliers.size() > 1 && b_offset != 0);
781 }
782 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
George Wort2d7e6832019-02-22 16:37:41 +0000783 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN && output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
784
785 if(bias != nullptr)
786 {
787 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
788 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
789 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
790 }
791
792 // If a_offset == 0, vector_sum_col can be a nullptr
793 if(a_offset != 0)
794 {
795 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
796 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
797 }
798
799 // If b_offset == 0, vector_sum_row can be a nullptr
800 if(b_offset != 0)
801 {
802 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
803
804 // Check if input is a 3D reinterpretation
805 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
806
807 // Validate input
808 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
809 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
810
811 TensorShape output_shape = output->tensor_shape();
812 if(output_shape.num_dimensions() > 1)
813 {
814 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
815
816 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
817 vector_sum_row_shape.collapse_from(1);
818 output_shape.collapse_from(output_batch_idx);
819
820 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
821 "mm_result tensor must have the same number of batches of output tensor");
822
823 if(a_offset != 0)
824 {
825 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
826 vector_sum_col_shape.collapse_from(1);
827
828 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
829 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
830 }
831 }
832 }
833
834 if(output->total_size() != 0)
835 {
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100836 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
George Wort2d7e6832019-02-22 16:37:41 +0000837 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
838 }
839
840 return Status{};
841}
842
843std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
844{
845 // Output auto inizialitation if not yet initialized
846 auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
847
848 // Configure kernel window
849 Window win = calculate_max_window(*mm_result, Steps());
850
851 // Note: This kernel performs 16 elements per iteration.
852 // However, since we use a left-over for loop, we cannot have any read or write out of memory
853 // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
854 Coordinates coord;
855 coord.set_num_dimensions(output->num_dimensions());
856 output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
857
858 return std::make_pair(Status{}, win);
859}
George Wort2d7e6832019-02-22 16:37:41 +0000860} // namespace
861
862NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel()
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100863 : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
George Wort2d7e6832019-02-22 16:37:41 +0000864 _output_stage(GEMMLowpOutputStageInfo())
865
866{
867}
868
869void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col,
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100870 const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
871 int32_t k, int32_t a_offset, int32_t b_offset,
872 GEMMLowpOutputStageInfo output_stage)
George Wort2d7e6832019-02-22 16:37:41 +0000873{
874 // Perform validate step
875 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
876
877 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
878 vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
879 vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
880 bias != nullptr ? bias->info() : nullptr, // NOLINT
881 output->info(), a_offset, b_offset, output_stage)); // NOLINT
882
883 _vector_sum_col = vector_sum_col;
884 _vector_sum_row = vector_sum_row;
885 _bias = bias;
886 _mm_result = mm_result;
887 _output = output;
888 _a_offset = a_offset;
889 _b_offset = b_offset;
890 _k_offset = a_offset * b_offset * k;
891 _output_stage = output_stage;
892
893 // If a_offset == 0, vector_sum_col can be a nullptr
894 if(a_offset != 0)
895 {
896 // Check if vector_sum_col_shape should be slidden or not
897 // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
898 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
899 _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
900 }
901
902 // Configure kernel window
903 auto win_config = validate_and_configure_window(mm_result->info(), output->info());
904 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
905 INEKernel::configure(win_config.second);
George Wort2d7e6832019-02-22 16:37:41 +0000906}
907
908Status NEGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col,
909 const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
910 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
911{
912 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
913 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
914 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first);
915 return Status{};
916}
917
918void NEGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, const ThreadInfo &info)
919{
920 ARM_COMPUTE_UNUSED(info);
921 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
922 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100923
924 PixelValue type_min{};
925 PixelValue type_max{};
926 std::tie(type_min, type_max) = get_min_max(_output->info()->data_type());
927 int32_t type_min_int = type_min.get<int32_t>();
928 int32_t type_max_int = type_max.get<int32_t>();
929
930 const bool reinterpret_as_3d = _vector_sum_row != nullptr
931 && _mm_result->info()->num_dimensions() > 1
932 && _mm_result->info()->tensor_shape().y() != _vector_sum_row->info()->tensor_shape().x();
933
934 const bool is_bounded_relu = !(_output_stage.gemmlowp_min_bound <= type_min_int && _output_stage.gemmlowp_max_bound >= type_max_int);
935
936 // Check if we need to perform fixed point requantization
937 const bool is_fixed_point = _output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
938
939 // Check if symmetric per-channel execution
940 const bool is_signed = _output->info()->data_type() == DataType::QASYMM8_SIGNED;
941
942 // Check if symmetric per-channel execution
943 const bool is_symm = _output_stage.is_quantized_per_channel;
944
945 if(is_symm)
946 {
947 run_offset_contribution_output_stage_symm(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
948 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
949 }
950 else
951 {
952 if(is_signed)
953 {
954 run_offset_contribution_output_stage<int8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
955 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
956 }
957 else
958 {
959 run_offset_contribution_output_stage<uint8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
960 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
961 }
962 }
George Wort2d7e6832019-02-22 16:37:41 +0000963}
964
Michalis Spyrou70d43a32020-06-22 17:05:43 +0100965} // namespace arm_compute