blob: 20c823014815b28a73ee5d0c8edc48ffc234a1fa [file] [log] [blame]
Georgios Pinitasc0b6f762020-11-02 01:37:17 +00001/*
Georgios Pinitas4ee8b152021-07-16 16:16:43 +01002 * Copyright (c) 2017-2021 Arm Limited.
Georgios Pinitasc0b6f762020-11-02 01:37:17 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#pragma once
25
26#include <alloca.h>
27
28#include <algorithm>
29#include <cassert>
30
31#include "arm_gemm.hpp"
32#include "bias_adder.hpp"
33#include "convolver.hpp"
34#include "ndrange.hpp"
35#include "performance_parameters.hpp"
36#include "transform.hpp"
37#include "utils.hpp"
38
39#ifdef CYCLE_PROFILING
40#include "profiler.hpp"
41#endif
42
43#ifndef UNUSED
44#define __I_DEFINED_UNUSED
45#define UNUSED(x) ((void)(x))
46#endif
47
48namespace arm_gemm {
49
50namespace {
51
52// We need to invoke the kernel differently for quantizing and non-quantizing cases, so here is a shim class to do
53// that.
54
55template<typename OutputStage, bool SeparateQuantize = false>
56class run_hybrid_kernel {
57public:
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010058 template<typename strategy, typename Tlo, typename Tro, typename Tr>
59 static inline void run (
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000060#ifdef CYCLE_PROFILING
61 profiler &prof,
62#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010063 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
64 unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000065 const OutputStage &os, const int32_t *col_bias, unsigned int n_0 );
66};
67
68template<>
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010069template<typename strategy, typename Tlo, typename Tro, typename Tr>
70inline void run_hybrid_kernel<Nothing, false>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000071#ifdef CYCLE_PROFILING
72 profiler &prof,
73#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010074 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
75 unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000076 const Nothing &, const int32_t *, unsigned int) {
77#ifdef CYCLE_PROFILING
78 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
79#endif
80 UNUSED(kern_k);
81
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010082 /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
Sheri Zhangb71322d2021-04-07 20:01:18 +010083 * a partial block and pad the bias for that block. */
84 if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
85 /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
86 unsigned int N_remainder = N % strategy::out_width();
87 unsigned int N_bulk = N - N_remainder;
88
89 /* Output argument to be used for the tail */
90 IndirectOutputArg<Tr> offset_output = output_arg;
91
92 /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
93 if (N_bulk > 0) {
94 strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, output_arg, bias_ptr, act, accumulate);
95
96 if (output_arg.is_indirect) {
97 offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
98 } else {
99 offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
100 }
101 }
102
103 /* Pad the bias buffer for the remainder */
104 Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
105 memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
106
107 /* Process the remainder, offsetting the B pointer as needed. */
108 strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder, b_ptr + (N_bulk * kern_k), offset_output, bias_pad_buffer, act, accumulate);
109 } else {
110 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, bias_ptr, act, accumulate);
111 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000112}
113
114template<>
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100115template<typename strategy, typename Tlo, typename Tro, typename Tr>
116inline void run_hybrid_kernel<Requantize32, false>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000117#ifdef CYCLE_PROFILING
118 profiler &prof,
119#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100120 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
121 unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000122 const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
123#ifdef CYCLE_PROFILING
124 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
125#endif
126 UNUSED(kern_k);
127
128 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, &os, col_bias + n_0, n_0);
129}
130
131template<>
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100132template<typename strategy, typename Tlo, typename Tro, typename Tr>
133inline void run_hybrid_kernel<Requantize32, true>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000134#ifdef CYCLE_PROFILING
135 profiler &prof,
136#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100137 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
138 unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000139 const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
140 UNUSED(kern_k);
141 // On this route we will only process one kernel height at a time and will make sure this happens in the driver loop.
142 assert(M <= strategy::out_height());
143 // We don't yet support indirect output (as the quantizer can't do it).
144 assert(output_arg.is_indirect == false);
145
146 // We need a row sum buffer and intermediate output buffer.
147 // These go on the stack as they are not too large, using an automatic array and alloca() respectively.
148 int32_t row_sums[strategy::out_height()];
149 typename strategy::result_type *result_buffer;
150
151 unsigned int output_width = roundup(N, strategy::out_width());
152
153 result_buffer = reinterpret_cast<typename strategy::result_type *>(alloca(output_width * strategy::out_height() * sizeof(typename strategy::result_type)));
154
155 {
156#ifdef CYCLE_PROFILING
157 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
158#endif
159 // Perform the GEMM, into the output buffer.
160 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, IndirectOutputArg<typename strategy::result_type>(result_buffer, output_width), nullptr, Activation(), false);
161 }
162
163 if (os.b_offset != 0) {
164#ifdef CYCLE_PROFILING
165 auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (unsigned long)M * kern_k);
166#endif
167 row_sums_indirect(num_strings, string_ptr, A_arg, M, row_sums, &os);
168 } else {
169 memset(row_sums, 0, sizeof(int32_t) * strategy::out_height());
170 }
171
172 {
173#ifdef CYCLE_PROFILING
174 auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (unsigned long)M * N);
175#endif
176 // Quantize
177 requantize_block_32(os, N, M, result_buffer, output_width, output_arg.direct.base, output_arg.direct.stride, row_sums, col_bias + n_0, n_0);
178 }
179}
180
181} // anonymous namespace
182
183// Implementation of the GemmCommon abstract class.
184template<typename strategy, typename To, typename Tr, typename OutputStage = Nothing, bool SeparateQuantize = false>
185class GemmHybridIndirect : public GemmCommon<To, Tr> {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100186 typedef typename strategy::lhs_operand_type Tloi;
187 typedef typename strategy::rhs_operand_type Troi;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000188 typedef typename strategy::result_type Tri;
189
190 GemmArgs _args;
191 OutputStage _os = {};
192
193 /* Quantized support (in addition to 'output stage' above) */
194 int32_t *_col_bias = nullptr;
195
196 const unsigned int _Ktotal;
197 const unsigned int _rounded_Ksize;
198
199 /* Blocking info */
200 const unsigned int _k_block;
201 const unsigned int _n_block;
202 const unsigned int _Mround;
203
204 /* Pretransposed buffer. */
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100205 const Troi *_B_transposed=nullptr;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000206
207 /* Indirect parameters. _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
208 const To * const * const * _indirect_buf = nullptr;
209
210 /* Convolver - only set up for convolution problems, so also doubles as a flag. */
211 std::unique_ptr<convolver<To>> _convolver = nullptr;
212
213 // Array of pointers to output rows
214// Tr * const * _output_ptrs;
215
216 const NDRange<4> _window_range;
217
218 unsigned int get_col_sum_size() const {
219 if (std::is_same<OutputStage, Requantize32>::value) {
220 return _args._Nsize * _args._nmulti * sizeof(int32_t);
221 } else {
222 return 0;
223 }
224 }
225
226 static unsigned int get_ktotal(const GemmArgs &args) {
227 return args._Ksections * roundup(args._Ksize, strategy::k_unroll());
228 }
229
230 static unsigned int compute_k_block(const GemmArgs &args) {
231 // Some kernels don't support accumulate mode - these can't do K blocking at all.
232 if (!strategy::supports_accumulate() || std::is_same<OutputStage, Requantize32>::value) {
233 return get_ktotal(args);
234 }
235
236 if (args._cfg && args._cfg->inner_block_size) {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100237 return roundup(args._cfg->inner_block_size, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000238 }
239
240 // Experimental data suggests an optimal block size of 512 for FP32 (scaling accordingly for other
241 // datatypes); but don't divide into blocks until we hit 1.5X this size.
242 unsigned int target_block_size = 2048 / sizeof(To);
243 auto ktotal = get_ktotal(args);
244
245 if (ktotal > ((target_block_size*3)/2)) {
246 unsigned int target_blocks = iceildiv(ktotal, target_block_size);
247
248 unsigned int block_size = iceildiv(ktotal, target_blocks);
249
250 block_size = roundup(block_size, strategy::k_unroll());
251
252 return block_size;
253 }
254
255 return ktotal;
256 }
257
258 // New N blocking strategy: if it's narrow, or much taller than it is wide, do the full width. Otherwise do a
259 // single block.
260 static unsigned int compute_n_block(const GemmArgs &args, const OutputStage os = {}) {
261 if (args._cfg && args._cfg->outer_block_size) {
262 return args._cfg->outer_block_size;
263 }
264
265 if (args._Nsize <= 64) {
266 return args._Nsize;
267 }
268
269 if ((args._Msize / args._Nsize) > 155) {
270 return args._Nsize;
271 }
272
273 // "Asymmetric" quantizing GEMMs require a different approach - the tall skinny blocks we would otherwise
274 // use imply a great deal of repeated work performing the row sums. If row sums are involved, work out how
275 // much "column" parallelism is going to be required and set the block size accordingly.
276 if (std::is_same<OutputStage, Requantize32>::value) {
277 const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
278
279 // Row sums only needed if b_offset isn't 0
280 if (qp->b_offset != 0) {
281 // We can already parallelize across batches, multis and rows (in units of 'out_height')
282 int multi_row_parallelism = args._nmulti * args._nbatches * iceildiv(args._Msize, strategy::out_height());
283
284 // If this isn't enough, we will need to split up the columns too.
285 if (multi_row_parallelism < args._maxthreads) {
286 unsigned int columns_needed = iceildiv(args._maxthreads, multi_row_parallelism);
287
288 unsigned int n_block = iceildiv(args._Nsize, columns_needed);
289
290 return roundup(n_block, strategy::out_width());
291 }
292
293 // Multi/Batch/Row parallelism is enough - don't split up the columns.
294 return args._Nsize;
295 }
296 }
297
298 if (args._Ksize <= 128 && args._maxthreads <= 16) {
299 return strategy::out_width() * 3;
300 }
301
302 return strategy::out_width();
303 }
304
305public:
306 GemmHybridIndirect(GemmHybridIndirect &) = delete;
307 GemmHybridIndirect & operator= (GemmHybridIndirect &) = delete;
308
309 /* Constructor */
310 GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
311 : _args(args), _os(os), _Ktotal(get_ktotal(args)),
312 _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
313 _k_block(compute_k_block(args)), _n_block(compute_n_block(args, os)),
314 _Mround(roundup(args._Msize, strategy::out_height())),
315 _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
316 iceildiv(args._Nsize, _n_block), args._nmulti)
317 {
318 // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
319 // GemmConfig. Clear out the pointer to avoid accidents.
320 _args._cfg = nullptr;
321 }
322
323 /* Constructor without OutputStage */
324 GemmHybridIndirect(const GemmArgs &args)
325 : _args(args), _Ktotal(get_ktotal(args)),
326 _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
327 _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
328 _Mround(roundup(args._Msize, strategy::out_height())),
329 _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
330 iceildiv(args._Nsize, _n_block), args._nmulti)
331 {
332 // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
333 // GemmConfig. Clear out the pointer to avoid accidents.
334 _args._cfg = nullptr;
335 }
336
337 // Interface implementation - Compulsory functions
338 ndrange_t get_window_size() const override {
339 return { _window_range.total_size() };
340 }
341
342 // This kernel can always be dynamically scheduled.
343 bool supports_dynamic_scheduling() const override {
344 return true;
345 }
346
347 // Execute
348 void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
349#ifdef CYCLE_PROFILING
350 profiler prof;
351#endif
352 strategy strat(_args._ci);
353
354 std::vector<const To *> in_row_ptrs;
355 std::vector<const To * const *> in_row_strings;
356 std::vector<unsigned int> string_lengths;
357
358 // In convolution mode, we need input pointers.
359 if (_convolver) {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100360 in_row_ptrs = std::vector<const To *>(strategy::out_height() * _args._Ksections, nullptr);
361 in_row_strings = std::vector<const To * const *>(_args._Ksections, nullptr);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000362
363 for (unsigned int i=0; i<_args._Ksections; i++) {
364 in_row_strings[i] = &(in_row_ptrs[i * strategy::out_height()]);
365 }
366 }
367
368 // In any indirect mode, we need the string lengths.
369 if (_args._indirect_input) {
370 string_lengths = std::vector<unsigned int>(_args._Ksections, 0);
371 }
372
373 /* Make sure we've been set up correctly. */
374 assert(_B_transposed);
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100375 static_assert(std::is_same<To, Tloi>::value, "gemm_native: Operand types must be the same.");
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000376// static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
377
378 /* For now, each work item implies all the K for a given output
379 * pixel (so we don't need to synchronize access to the output
380 * array). So separate the loop over K blocks here. */
381 for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
382 unsigned int kmax = std::min(k0 + _k_block, _Ktotal);
383 unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
384
385 const bool first_pass = (k0 == 0);
386 const bool last_pass = (kmax == _Ktotal);
387
388 unsigned int first_section = (k0 / _rounded_Ksize);
389 unsigned int first_offset = (k0 % _rounded_Ksize);
390 unsigned int kleft = kern_k;
391 unsigned int sections=0;
392 unsigned int offset = first_offset;
393
394 if (_args._indirect_input) {
395 while (kleft) {
396 // When chopping into sections: the amount that goes into 'string_lengths' is the amount to be
397 // processed (excluding padding). But the amount we subtract from 'kleft' takes account of any
398 // padding applied.
399 string_lengths[sections] = std::min(kleft, _args._Ksize - offset);
400 kleft -= std::min(kleft, _rounded_Ksize - offset);
401 sections++;
402 offset=0;
403 }
404 }
405
406 auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
407
408 if (p.done()) {
409 return;
410 }
411
412 // Process rows either 'out_height' rows at a time, or do all valid rows at once with a single kernel call.
413 // The separate quantizer path only handles one block of rows at a time (as it has to store sums and intermediate results).
414 // THe convolution path only generates the pointers for one block of rows at a time.
415 const bool process_all_rows = (!SeparateQuantize && !_convolver);
416
417 do {
418 const unsigned int m_start = p.dim(0) * strategy::out_height();
419 const unsigned int m_end = process_all_rows ? std::min(p.dim0_max() * strategy::out_height(), _args._Msize) : std::min(m_start + strategy::out_height(), _args._Msize);
420// const unsigned int m_end = std::min(m_start + strategy::out_height(), _args._Msize);
421 const unsigned int batch = p.dim(1);
422 const unsigned int n0 = p.dim(2) * _n_block;
423 const unsigned int nmax = std::min(n0 + _n_block, _args._Nsize);
424 const unsigned int multi = p.dim(3);
425
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100426 const Troi *b_panel = _B_transposed +
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000427 (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
428 (k0 * roundup(_args._Nsize, strategy::out_width())) +
429 (n0 * kern_k);
430
431 IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
432
433#ifdef CYCLE_PROFILING
434 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
435#endif
436 if (_indirect_buf) {
437 run_hybrid_kernel<OutputStage, SeparateQuantize>::run(
438#ifdef CYCLE_PROFILING
439 prof,
440#endif
441 strat, sections, string_lengths.data(),
442 IndirectInputArg<To>(_indirect_buf + (multi * _args._nbatches * _args._Ksections) + (batch * _args._Ksections) + first_section, m_start, first_offset),
443 (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
444 (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
445 last_pass ? _args._act : Activation(),
446 !first_pass,
447 // Quantization parameters
448 _os, _col_bias+(multi * _args._Nsize), n0);
449 } else if (_convolver) {
450 auto conv_cols = _convolver->process_columns(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride), this->_lda, k0, kmax, _rounded_Ksize);
451
452 unsigned int pos=0;
453 auto conv_rows = conv_cols.process_rows(m_start, m_end - m_start);
454
455 while (!conv_rows.finished()) {
456 unsigned int width, conv_offset;
457
458 assert(pos < sections);
459
460 std::tie(width, conv_offset) = conv_rows.next_block(&(in_row_ptrs[pos * strategy::out_height()]));
461
462 if (pos==0) {
463 assert(conv_offset == first_offset);
464 }
465 assert(width == string_lengths[pos]);
466 pos++;
467 }
468 assert(pos == sections);
469
470 run_hybrid_kernel<OutputStage, SeparateQuantize>::run(
471#ifdef CYCLE_PROFILING
472 prof,
473#endif
474 strat, sections, string_lengths.data(),
475 IndirectInputArg<To>(in_row_strings.data(), 0, first_offset),
476 (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
477 (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
478 last_pass ? _args._act : Activation(),
479 !first_pass,
480 // Quantization parameters
481 _os, _col_bias+(multi * _args._Nsize), n0);
482 } else {
483 // Length to process. This needs to exclude padding, but 'kmax' potentially includes it.
484 const unsigned int len = (std::min(_args._Ksize, kmax) - k0);
485
486 run_hybrid_kernel<OutputStage, SeparateQuantize>::run(
487#ifdef CYCLE_PROFILING
488 prof,
489#endif
490 strat, 1, &len,
491 IndirectInputArg<To>(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + m_start * this->_lda + k0, this->_lda),
492 (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
493 (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
494 last_pass ? _args._act : Activation(),
495 !first_pass,
496 // Quantization parameters
497 _os, _col_bias+(multi * _args._Nsize), n0);
498 }
499 } while (process_all_rows ? p.next_dim1() : p.next_dim0());
500 }
501 }
502
503 // Interface implementation - pretransposed
504 bool B_is_pretransposed() const override {
505 return true;
506 }
507
508 bool B_pretranspose_required() const override {
509 return (_B_transposed==nullptr);
510 }
511
512 size_t get_B_pretransposed_array_size() const override {
513 // Start with actual pretransposed buffer...
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100514 size_t size = roundup(_args._Nsize, strategy::out_width()) * _Ktotal * _args._nmulti * sizeof(Troi);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000515
516 // Space for result row pointers (not strictly needed any more but retained for indirect output testing)
517 size += _args._Msize * _args._nbatches * _args._nmulti * sizeof(const Tr *);
518
519 if (std::is_same<OutputStage, Requantize32>::value) {
520 size += get_col_sum_size();
521 }
522
523 return size;
524 }
525
Michele Di Giorgioaed63ee2021-07-26 13:18:50 +0100526 void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000527 if (std::is_same<OutputStage, Requantize32>::value) {
528 _col_bias = reinterpret_cast<int32_t *>(in_buffer);
529
530 Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
531
532 for (unsigned int i=0; i<_args._nmulti; i++) {
533 // The input is assumed not to have any padding between sections, so straightforward Ksize * Ksections computation gets the total size.
534 compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize * _args._Ksections, B + (i * B_multi_stride), ldb, _col_bias + (i * _args._Nsize), _args._Ksize * _args._Ksections, i, 0);
535 }
536 }
Michele Di Giorgioaed63ee2021-07-26 13:18:50 +0100537 }
538
539 void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
540 requantize_bias(in_buffer, B, ldb, B_multi_stride);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000541
542 // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
543 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100544 Troi *buffer = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000545 _B_transposed = buffer;
546
547 strategy strat(_args._ci);
548
549 for (unsigned int multi=0; multi<_args._nmulti; multi++) {
550 for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
551 const unsigned int kmax=std::min(k0 + _k_block, _Ktotal);
552
553 /* Figure out the size of each block. */
554 unsigned int k_size = kmax - k0;
555
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100556 if (_args._Ksections > 1) {
557 // We need to insert padding at the end of each K section.
558 // The computation needed is a little delicate - the coordinates from the block walker are expressed in
559 // terms of the full, padded, _Ktotal.
560 // But we need to transform each section with reference to the original, unpadded, input, letting the
561 // transform pad each section as needed.
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000562
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100563 // This is needed for computations below.
564 const unsigned int rounded_section_size = roundup(_args._Ksize, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000565
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100566 // The expected output format is also an entire <out_width> columns interleaved, then the next set of
567 // columns, and so on. This means, as we are breaking it up vertically, we have to do it one column at
568 // a time.
569 for (unsigned int x0=0; x0 < _args._Nsize; x0 += strategy::out_width() ){
570 unsigned int xmax = std::min(x0 + strategy::out_width(), _args._Nsize);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000571
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100572 // Track where we are and how much work is left.
573 unsigned int kpos = k0;
574 unsigned int kleft = k_size;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000575
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100576 while (kleft) {
577 // Which section are we in? Based on the rounded-up section size.
578 unsigned int k_section_base = kpos / rounded_section_size;
579 // How far into the section are we?
580 unsigned int k_offset = kpos - (k_section_base * rounded_section_size);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000581
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100582 // We will either copy the rest of this section, or to the end of the requested length.
583 unsigned int k_length = std::min(_args._Ksize - k_offset, kleft);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000584
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100585 strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
586 x0, xmax,
587 (k_section_base * _args._Ksize) + k_offset, // K starting point - compute row to read based on our section and the true section length.
588 (k_section_base * _args._Ksize) + k_offset + k_length); // K end point - starting point plus length computed above.
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000589
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100590 // We need to modify our position based on the ROUNDED version of what we just did.
591 unsigned int padded_length = roundup(k_length, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000592
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100593 buffer += strategy::out_width() * padded_length;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000594
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100595 kpos += padded_length;
596 kleft -= padded_length;
597 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000598 }
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100599 } else {
600 // In the single K section case, can process the whole lot in one go.
601 // Caution: 'blockwalker::kmax()' rounds up, so clamp to valid _Ksize.
602 strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
603 0, _args._Nsize, k0, std::min(kmax, _args._Ksize));
604 buffer += roundup(_args._Nsize, strategy::out_width()) * roundup(kmax-k0, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000605 }
606 }
607 }
608 }
609
610 void set_pretransposed_B_data(void *in_buffer) override {
611 // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
612 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100613 _B_transposed = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000614 _col_bias = reinterpret_cast<int32_t *>(in_buffer);
615 }
616
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100617 // Estimate cycles for given problem given provided parameters.
618 // "perf_type" is a type to pass along to get_performance_parameters to get the right set of performance
619 // parameters - it's arbitrary but usually either the input or output type.
620 template <typename perf_type>
621 static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os = {}) {
622 const PerformanceParameters params = strategy::template get_performance_parameters<perf_type>(args._ci);
623
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000624 // Note: Current hybrid kernels don't actually round up height (they
625 // have paths for each possible height). Might need to make this
626 // configurable in future.
Georgios Pinitas6f45cf72021-02-23 23:41:40 +0000627 uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000628
629 float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
630
631 // TODO: A bit of a kludge here: current hybrid kernels incur extra
632 // overhead where the width is not a multiple of kernel width. It's
633 // most noticable where the overall width is quite low, so add 15%
634 // penalty for such widths.
635 if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize < 2*strategy::out_width())) {
636 mac_cycles *= 1.15f;
637 }
638
639 uint64_t total_cycles = mac_cycles;
640
Georgios Pinitas33e03072021-01-14 13:43:40 +0000641 // Quantizing kernels with separate quantize need to add in the extra stages.
642 if (std::is_same<OutputStage, Requantize32>::value && SeparateQuantize) {
643 const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
644
645 // Row sums: need to consider each value in A (batch * multi * M * K)...
Georgios Pinitas6f45cf72021-02-23 23:41:40 +0000646 uint64_t rowsum_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * get_ktotal(args);
Georgios Pinitas33e03072021-01-14 13:43:40 +0000647
648 // ... but row sums are skipped if B offset==0.
649 if (qp->b_offset == 0) {
650 rowsum_bytes = 0;
651 }
652
653 // Use "prepare bytes per cycle" to store "row sum values per cycle".
654 float rowsum_cycles = static_cast<float>(rowsum_bytes) / params.prepare_bytes_cycle;
655
656 // Requantize: need to consider each value in C (batch * multi * M * N)
657 uint64_t requantize_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * args._Nsize;
658
659 // Use "merge bytes per cycle" to store "requantize values per cycle".
660 float requantize_cycles = static_cast<float>(requantize_bytes) / params.merge_bytes_cycle;
661
662 // Recalculate total_cycles with the extra components.
663 total_cycles = mac_cycles + rowsum_cycles + requantize_cycles;
664 }
665
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000666 return total_cycles;
667 }
668
669 void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
670 if (std::is_same<OutputStage, Requantize32>::value) {
671 Requantize32 *qp = reinterpret_cast<Requantize32 *>(&_os);
672
673 qp->bias = bias;
674 qp->bias_multi_stride = bias_multi_stride;
675 }
676 }
677
678 void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
679 assert(string_len == _args._Ksize);
680 _indirect_buf = ptr;
681 }
682
683 void set_convolution_parameters(ConvolutionParameters parms) override {
684 assert(parms.input_channels == _args._Ksize);
685 _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
686 }
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100687
688 GemmConfig get_config() override {
689 GemmConfig c;
690
691 c.method = GemmMethod::GEMM_HYBRID;
692 c.inner_block_size = _k_block;
693 c.outer_block_size = _n_block;
694 c.filter = get_type_name<strategy>();
695
696 return c;
697 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000698};
699
700} // namespace arm_gemm
701
702#ifdef __I_DEFINED_UNUSED
703#undef UNUSED
704#endif