blob: 89c2d5a23e4c73703a44b60f037a964122cbfd81 [file] [log] [blame]
Georgios Pinitasc0b6f762020-11-02 01:37:17 +00001/*
Gunes Bayiref637392024-02-12 21:32:51 +00002 * Copyright (c) 2017-2024 Arm Limited.
Georgios Pinitasc0b6f762020-11-02 01:37:17 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#pragma once
25
Pablo Tello4e66d702022-03-07 18:20:12 +000026#if !defined(_WIN64) && !defined(__OpenBSD__)
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000027#include <alloca.h>
Pablo Tello4e66d702022-03-07 18:20:12 +000028#endif /* !defined(_WIN64) && !defined(__OpenBSD__) */
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000029
30#include <algorithm>
31#include <cassert>
32
33#include "arm_gemm.hpp"
34#include "bias_adder.hpp"
35#include "convolver.hpp"
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000036#include "kernel_weight_format.hpp"
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000037#include "ndrange.hpp"
38#include "performance_parameters.hpp"
39#include "transform.hpp"
40#include "utils.hpp"
41
42#ifdef CYCLE_PROFILING
43#include "profiler.hpp"
44#endif
45
46#ifndef UNUSED
47#define __I_DEFINED_UNUSED
48#define UNUSED(x) ((void)(x))
49#endif
50
51namespace arm_gemm {
52
53namespace {
54
55// We need to invoke the kernel differently for quantizing and non-quantizing cases, so here is a shim class to do
56// that.
57
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000058template<typename OutputStage, bool SeparateQuantize, bool FixedFormat>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000059class run_hybrid_kernel {
60public:
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010061 template<typename strategy, typename Tlo, typename Tro, typename Tr>
62 static inline void run (
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000063#ifdef CYCLE_PROFILING
64 profiler &prof,
65#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010066 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000067 unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000068 const OutputStage &os, const int32_t *col_bias, unsigned int n_0 );
69};
70
71template<>
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010072template<typename strategy, typename Tlo, typename Tro, typename Tr>
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000073inline void run_hybrid_kernel<Nothing, false, false>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000074#ifdef CYCLE_PROFILING
75 profiler &prof,
76#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010077 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +000078 unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +000079 const Nothing &, const int32_t *, unsigned int) {
80#ifdef CYCLE_PROFILING
81 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
82#endif
83 UNUSED(kern_k);
84
Georgios Pinitas4ee8b152021-07-16 16:16:43 +010085 /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
Sheri Zhangb71322d2021-04-07 20:01:18 +010086 * a partial block and pad the bias for that block. */
87 if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
88 /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
89 unsigned int N_remainder = N % strategy::out_width();
90 unsigned int N_bulk = N - N_remainder;
91
92 /* Output argument to be used for the tail */
93 IndirectOutputArg<Tr> offset_output = output_arg;
94
95 /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
96 if (N_bulk > 0) {
97 strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, output_arg, bias_ptr, act, accumulate);
98
99 if (output_arg.is_indirect) {
100 offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
101 } else {
102 offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
103 }
104 }
105
106 /* Pad the bias buffer for the remainder */
107 Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
108 memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
109
110 /* Process the remainder, offsetting the B pointer as needed. */
111 strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder, b_ptr + (N_bulk * kern_k), offset_output, bias_pad_buffer, act, accumulate);
112 } else {
113 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, bias_ptr, act, accumulate);
114 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000115}
116
117template<>
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100118template<typename strategy, typename Tlo, typename Tro, typename Tr>
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000119inline void run_hybrid_kernel<Nothing, false, true>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000120#ifdef CYCLE_PROFILING
121 profiler &prof,
122#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100123 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000124 unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
125 const Nothing &, const int32_t *, unsigned int) {
126#ifdef CYCLE_PROFILING
127 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
128#endif
129 UNUSED(kern_k);
130
131 /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
132 * a partial block and pad the bias for that block. */
133 if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
134 /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
135 unsigned int N_remainder = N % strategy::out_width();
136 unsigned int N_bulk = N - N_remainder;
137
138 /* Output argument to be used for the tail */
139 IndirectOutputArg<Tr> offset_output = output_arg;
140
141 /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
142 if (N_bulk > 0) {
143 strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
144
145 if (output_arg.is_indirect) {
146 offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
147 } else {
148 offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
149 }
150 }
151
152 /* Pad the bias buffer for the remainder */
153 Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
154 memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
155
156 /* Process the remainder, offsetting the B pointer as needed. */
157 strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder,
158 b_ptr + (N_bulk / strategy::stripe_width()) * b_stride, b_stride, offset_output,
159 bias_pad_buffer, act, accumulate);
160 } else {
161 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
162 }
163}
164
165template<>
166template<typename strategy, typename Tlo, typename Tro, typename Tr>
167inline void run_hybrid_kernel<Requantize32, false, false>::run(
168#ifdef CYCLE_PROFILING
169 profiler &prof,
170#endif
171 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
172 unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000173 const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
174#ifdef CYCLE_PROFILING
175 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
176#endif
177 UNUSED(kern_k);
178
179 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, &os, col_bias + n_0, n_0);
180}
181
182template<>
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100183template<typename strategy, typename Tlo, typename Tro, typename Tr>
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000184inline void run_hybrid_kernel<Requantize32, true, false>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000185#ifdef CYCLE_PROFILING
186 profiler &prof,
187#endif
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100188 const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000189 unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000190 const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
191 UNUSED(kern_k);
192 // On this route we will only process one kernel height at a time and will make sure this happens in the driver loop.
193 assert(M <= strategy::out_height());
194 // We don't yet support indirect output (as the quantizer can't do it).
195 assert(output_arg.is_indirect == false);
196
197 // We need a row sum buffer and intermediate output buffer.
198 // These go on the stack as they are not too large, using an automatic array and alloca() respectively.
199 int32_t row_sums[strategy::out_height()];
200 typename strategy::result_type *result_buffer;
201
202 unsigned int output_width = roundup(N, strategy::out_width());
203
204 result_buffer = reinterpret_cast<typename strategy::result_type *>(alloca(output_width * strategy::out_height() * sizeof(typename strategy::result_type)));
205
206 {
207#ifdef CYCLE_PROFILING
208 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
209#endif
210 // Perform the GEMM, into the output buffer.
211 strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, IndirectOutputArg<typename strategy::result_type>(result_buffer, output_width), nullptr, Activation(), false);
212 }
213
214 if (os.b_offset != 0) {
215#ifdef CYCLE_PROFILING
216 auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (unsigned long)M * kern_k);
217#endif
218 row_sums_indirect(num_strings, string_ptr, A_arg, M, row_sums, &os);
219 } else {
220 memset(row_sums, 0, sizeof(int32_t) * strategy::out_height());
221 }
222
223 {
224#ifdef CYCLE_PROFILING
225 auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (unsigned long)M * N);
226#endif
227 // Quantize
228 requantize_block_32(os, N, M, result_buffer, output_width, output_arg.direct.base, output_arg.direct.stride, row_sums, col_bias + n_0, n_0);
229 }
230}
231
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000232template<typename strategy, bool FixedFormat>
233struct stripe_width {
234 static unsigned int get() {
235 return strategy::stripe_width();
236 }
237};
238
239template<typename strategy>
240struct stripe_width<strategy, false> {
241 static unsigned int get() {
242 return 0;
243 }
244};
245
246template<typename strategy, bool FixedFormat>
247struct kernel_weight_format {
248 static KernelWeightFormat get() {
249 return strategy::kernel_weight_format();
250 }
251};
252
253template<typename strategy>
254struct kernel_weight_format<strategy, false> {
255 static KernelWeightFormat get() {
256 return KernelWeightFormat::NON_FIXED;
257 }
258};
259
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000260} // anonymous namespace
261
262// Implementation of the GemmCommon abstract class.
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000263template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool SeparateQuantize=false, bool FixedFormat=false>
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000264class GemmHybridIndirect : public GemmCommon<To, Tr> {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100265 typedef typename strategy::lhs_operand_type Tloi;
266 typedef typename strategy::rhs_operand_type Troi;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000267 typedef typename strategy::result_type Tri;
268
269 GemmArgs _args;
270 OutputStage _os = {};
271
272 /* Quantized support (in addition to 'output stage' above) */
273 int32_t *_col_bias = nullptr;
274
275 const unsigned int _Ktotal;
276 const unsigned int _rounded_Ksize;
277
278 /* Blocking info */
279 const unsigned int _k_block;
280 const unsigned int _n_block;
281 const unsigned int _Mround;
282
283 /* Pretransposed buffer. */
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100284 const Troi *_B_transposed=nullptr;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000285
286 /* Indirect parameters. _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
287 const To * const * const * _indirect_buf = nullptr;
288
289 /* Convolver - only set up for convolution problems, so also doubles as a flag. */
290 std::unique_ptr<convolver<To>> _convolver = nullptr;
291
292 // Array of pointers to output rows
293// Tr * const * _output_ptrs;
294
295 const NDRange<4> _window_range;
296
297 unsigned int get_col_sum_size() const {
298 if (std::is_same<OutputStage, Requantize32>::value) {
299 return _args._Nsize * _args._nmulti * sizeof(int32_t);
300 } else {
301 return 0;
302 }
303 }
304
305 static unsigned int get_ktotal(const GemmArgs &args) {
306 return args._Ksections * roundup(args._Ksize, strategy::k_unroll());
307 }
308
309 static unsigned int compute_k_block(const GemmArgs &args) {
310 // Some kernels don't support accumulate mode - these can't do K blocking at all.
311 if (!strategy::supports_accumulate() || std::is_same<OutputStage, Requantize32>::value) {
312 return get_ktotal(args);
313 }
314
315 if (args._cfg && args._cfg->inner_block_size) {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100316 return roundup(args._cfg->inner_block_size, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000317 }
318
319 // Experimental data suggests an optimal block size of 512 for FP32 (scaling accordingly for other
320 // datatypes); but don't divide into blocks until we hit 1.5X this size.
321 unsigned int target_block_size = 2048 / sizeof(To);
322 auto ktotal = get_ktotal(args);
323
324 if (ktotal > ((target_block_size*3)/2)) {
325 unsigned int target_blocks = iceildiv(ktotal, target_block_size);
326
327 unsigned int block_size = iceildiv(ktotal, target_blocks);
328
329 block_size = roundup(block_size, strategy::k_unroll());
330
331 return block_size;
332 }
333
334 return ktotal;
335 }
336
337 // New N blocking strategy: if it's narrow, or much taller than it is wide, do the full width. Otherwise do a
338 // single block.
339 static unsigned int compute_n_block(const GemmArgs &args, const OutputStage os = {}) {
340 if (args._cfg && args._cfg->outer_block_size) {
341 return args._cfg->outer_block_size;
342 }
343
344 if (args._Nsize <= 64) {
345 return args._Nsize;
346 }
347
348 if ((args._Msize / args._Nsize) > 155) {
349 return args._Nsize;
350 }
351
352 // "Asymmetric" quantizing GEMMs require a different approach - the tall skinny blocks we would otherwise
353 // use imply a great deal of repeated work performing the row sums. If row sums are involved, work out how
354 // much "column" parallelism is going to be required and set the block size accordingly.
355 if (std::is_same<OutputStage, Requantize32>::value) {
356 const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
357
358 // Row sums only needed if b_offset isn't 0
359 if (qp->b_offset != 0) {
360 // We can already parallelize across batches, multis and rows (in units of 'out_height')
361 int multi_row_parallelism = args._nmulti * args._nbatches * iceildiv(args._Msize, strategy::out_height());
362
363 // If this isn't enough, we will need to split up the columns too.
364 if (multi_row_parallelism < args._maxthreads) {
365 unsigned int columns_needed = iceildiv(args._maxthreads, multi_row_parallelism);
366
367 unsigned int n_block = iceildiv(args._Nsize, columns_needed);
368
369 return roundup(n_block, strategy::out_width());
370 }
371
372 // Multi/Batch/Row parallelism is enough - don't split up the columns.
373 return args._Nsize;
374 }
375 }
376
377 if (args._Ksize <= 128 && args._maxthreads <= 16) {
378 return strategy::out_width() * 3;
379 }
380
381 return strategy::out_width();
382 }
383
384public:
385 GemmHybridIndirect(GemmHybridIndirect &) = delete;
386 GemmHybridIndirect & operator= (GemmHybridIndirect &) = delete;
387
388 /* Constructor */
389 GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
390 : _args(args), _os(os), _Ktotal(get_ktotal(args)),
391 _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
392 _k_block(compute_k_block(args)), _n_block(compute_n_block(args, os)),
393 _Mround(roundup(args._Msize, strategy::out_height())),
394 _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
395 iceildiv(args._Nsize, _n_block), args._nmulti)
396 {
397 // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
398 // GemmConfig. Clear out the pointer to avoid accidents.
399 _args._cfg = nullptr;
400 }
401
402 /* Constructor without OutputStage */
403 GemmHybridIndirect(const GemmArgs &args)
404 : _args(args), _Ktotal(get_ktotal(args)),
405 _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
406 _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
407 _Mround(roundup(args._Msize, strategy::out_height())),
408 _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
409 iceildiv(args._Nsize, _n_block), args._nmulti)
410 {
411 // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
412 // GemmConfig. Clear out the pointer to avoid accidents.
413 _args._cfg = nullptr;
414 }
415
416 // Interface implementation - Compulsory functions
417 ndrange_t get_window_size() const override {
418 return { _window_range.total_size() };
419 }
420
421 // This kernel can always be dynamically scheduled.
422 bool supports_dynamic_scheduling() const override {
423 return true;
424 }
425
426 // Execute
427 void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
428#ifdef CYCLE_PROFILING
429 profiler prof;
430#endif
431 strategy strat(_args._ci);
432
433 std::vector<const To *> in_row_ptrs;
434 std::vector<const To * const *> in_row_strings;
435 std::vector<unsigned int> string_lengths;
436
437 // In convolution mode, we need input pointers.
438 if (_convolver) {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100439 in_row_ptrs = std::vector<const To *>(strategy::out_height() * _args._Ksections, nullptr);
440 in_row_strings = std::vector<const To * const *>(_args._Ksections, nullptr);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000441
442 for (unsigned int i=0; i<_args._Ksections; i++) {
Viet-Hoa Do246fe082023-08-16 10:29:00 +0100443 in_row_strings[i] = &(in_row_ptrs.data()[i * strategy::out_height()]);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000444 }
445 }
446
447 // In any indirect mode, we need the string lengths.
448 if (_args._indirect_input) {
449 string_lengths = std::vector<unsigned int>(_args._Ksections, 0);
450 }
451
452 /* Make sure we've been set up correctly. */
Francesco Petrogalli553f6952022-06-30 10:22:01 +0000453 assert(FixedFormat || _B_transposed);
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100454 static_assert(std::is_same<To, Tloi>::value, "gemm_native: Operand types must be the same.");
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000455// static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
456
457 /* For now, each work item implies all the K for a given output
458 * pixel (so we don't need to synchronize access to the output
459 * array). So separate the loop over K blocks here. */
460 for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
461 unsigned int kmax = std::min(k0 + _k_block, _Ktotal);
462 unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
463
464 const bool first_pass = (k0 == 0);
465 const bool last_pass = (kmax == _Ktotal);
466
467 unsigned int first_section = (k0 / _rounded_Ksize);
468 unsigned int first_offset = (k0 % _rounded_Ksize);
469 unsigned int kleft = kern_k;
470 unsigned int sections=0;
471 unsigned int offset = first_offset;
472
473 if (_args._indirect_input) {
474 while (kleft) {
475 // When chopping into sections: the amount that goes into 'string_lengths' is the amount to be
476 // processed (excluding padding). But the amount we subtract from 'kleft' takes account of any
477 // padding applied.
478 string_lengths[sections] = std::min(kleft, _args._Ksize - offset);
479 kleft -= std::min(kleft, _rounded_Ksize - offset);
480 sections++;
481 offset=0;
482 }
483 }
484
485 auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
486
487 if (p.done()) {
488 return;
489 }
490
491 // Process rows either 'out_height' rows at a time, or do all valid rows at once with a single kernel call.
492 // The separate quantizer path only handles one block of rows at a time (as it has to store sums and intermediate results).
493 // THe convolution path only generates the pointers for one block of rows at a time.
494 const bool process_all_rows = (!SeparateQuantize && !_convolver);
495
496 do {
497 const unsigned int m_start = p.dim(0) * strategy::out_height();
498 const unsigned int m_end = process_all_rows ? std::min(p.dim0_max() * strategy::out_height(), _args._Msize) : std::min(m_start + strategy::out_height(), _args._Msize);
499// const unsigned int m_end = std::min(m_start + strategy::out_height(), _args._Msize);
500 const unsigned int batch = p.dim(1);
501 const unsigned int n0 = p.dim(2) * _n_block;
502 const unsigned int nmax = std::min(n0 + _n_block, _args._Nsize);
503 const unsigned int multi = p.dim(3);
504
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000505 const Troi *b_panel;
506 if (FixedFormat) {
507 b_panel = reinterpret_cast<const Troi *>(this->_Bptr) +
508 (multi * this->_B_multi_stride) +
509 ((n0 / stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
510 (k0 * stripe_width<strategy, FixedFormat>::get());
511 } else {
512 b_panel = _B_transposed +
513 (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
514 (k0 * roundup(_args._Nsize, strategy::out_width())) +
515 (n0 * kern_k);
516 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000517
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000518 IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000519
520#ifdef CYCLE_PROFILING
521 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
522#endif
523 if (_indirect_buf) {
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000524 run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000525#ifdef CYCLE_PROFILING
526 prof,
527#endif
528 strat, sections, string_lengths.data(),
529 IndirectInputArg<To>(_indirect_buf + (multi * _args._nbatches * _args._Ksections) + (batch * _args._Ksections) + first_section, m_start, first_offset),
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000530 (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000531 (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
532 last_pass ? _args._act : Activation(),
533 !first_pass,
534 // Quantization parameters
535 _os, _col_bias+(multi * _args._Nsize), n0);
536 } else if (_convolver) {
537 auto conv_cols = _convolver->process_columns(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride), this->_lda, k0, kmax, _rounded_Ksize);
538
539 unsigned int pos=0;
540 auto conv_rows = conv_cols.process_rows(m_start, m_end - m_start);
541
542 while (!conv_rows.finished()) {
543 unsigned int width, conv_offset;
544
545 assert(pos < sections);
546
547 std::tie(width, conv_offset) = conv_rows.next_block(&(in_row_ptrs[pos * strategy::out_height()]));
548
549 if (pos==0) {
550 assert(conv_offset == first_offset);
551 }
552 assert(width == string_lengths[pos]);
553 pos++;
554 }
555 assert(pos == sections);
556
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000557 run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000558#ifdef CYCLE_PROFILING
559 prof,
560#endif
561 strat, sections, string_lengths.data(),
562 IndirectInputArg<To>(in_row_strings.data(), 0, first_offset),
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000563 (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000564 (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
565 last_pass ? _args._act : Activation(),
566 !first_pass,
567 // Quantization parameters
568 _os, _col_bias+(multi * _args._Nsize), n0);
569 } else {
570 // Length to process. This needs to exclude padding, but 'kmax' potentially includes it.
571 const unsigned int len = (std::min(_args._Ksize, kmax) - k0);
572
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000573 run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000574#ifdef CYCLE_PROFILING
575 prof,
576#endif
577 strat, 1, &len,
578 IndirectInputArg<To>(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + m_start * this->_lda + k0, this->_lda),
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000579 (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000580 (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
581 last_pass ? _args._act : Activation(),
582 !first_pass,
583 // Quantization parameters
584 _os, _col_bias+(multi * _args._Nsize), n0);
585 }
586 } while (process_all_rows ? p.next_dim1() : p.next_dim0());
587 }
588 }
589
590 // Interface implementation - pretransposed
591 bool B_is_pretransposed() const override {
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000592 return (FixedFormat == false);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000593 }
594
595 bool B_pretranspose_required() const override {
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000596 return (FixedFormat == false) && (_B_transposed==nullptr);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000597 }
598
599 size_t get_B_pretransposed_array_size() const override {
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000600 if (FixedFormat) {
601 return 0;
602 }
603
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000604 // Start with actual pretransposed buffer...
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100605 size_t size = roundup(_args._Nsize, strategy::out_width()) * _Ktotal * _args._nmulti * sizeof(Troi);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000606
607 // Space for result row pointers (not strictly needed any more but retained for indirect output testing)
608 size += _args._Msize * _args._nbatches * _args._nmulti * sizeof(const Tr *);
609
610 if (std::is_same<OutputStage, Requantize32>::value) {
611 size += get_col_sum_size();
612 }
613
614 return size;
615 }
616
SiCong Lidba672c2023-04-06 16:30:18 +0100617 size_t get_B_pretranspose_window_size() const override {
618 return _args._nmulti * iceildiv(_args._Nsize, strategy::out_width());
619 }
620
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100621 void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000622 if (std::is_same<OutputStage, Requantize32>::value) {
623 _col_bias = reinterpret_cast<int32_t *>(in_buffer);
624
625 Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
626
627 for (unsigned int i=0; i<_args._nmulti; i++) {
628 // The input is assumed not to have any padding between sections, so straightforward Ksize * Ksections computation gets the total size.
629 compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize * _args._Ksections, B + (i * B_multi_stride), ldb, _col_bias + (i * _args._Nsize), _args._Ksize * _args._Ksections, i, 0);
630 }
631 }
Giorgio Arena63e0beb2021-09-24 14:04:27 +0100632 }
633
Gunes Bayiref637392024-02-12 21:32:51 +0000634 bool B_pretranspose_supports_transpose() const override {
635 strategy strat(_args._ci);
636 return strat.transforms.PrepareB_supports_transpose();
SiCong Lidba672c2023-04-06 16:30:18 +0100637 }
638
Gunes Bayiref637392024-02-12 21:32:51 +0000639 void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, bool transposed) override {
640 pretranspose_B_array_part(in_buffer, B, ldb, B_multi_stride, transposed, 0, get_B_pretranspose_window_size());
641 }
642
643 void pretranspose_B_array_part(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, bool transposed, size_t start, size_t end) override {
SiCong Lidba672c2023-04-06 16:30:18 +0100644 if (end >= get_B_pretranspose_window_size()) {
645 requantize_bias(in_buffer, B, ldb, B_multi_stride);
646 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000647
648 // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
649 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
SiCong Lidba672c2023-04-06 16:30:18 +0100650 Troi *buffer_base = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
651 _B_transposed = buffer_base;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000652
653 strategy strat(_args._ci);
SiCong Lidba672c2023-04-06 16:30:18 +0100654 size_t work_per_multi = iceildiv(_args._Nsize, strategy::out_width());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000655
SiCong Lidba672c2023-04-06 16:30:18 +0100656 for (unsigned int multi=(start / work_per_multi); multi<_args._nmulti; multi++) {
657 // Work out which part of the window space this multi occupies,
658 // skip to the next multi or exit as needed.
659 size_t wk_start = multi * work_per_multi;
660 size_t wk_end = (multi + 1) * work_per_multi;
661
662 assert(wk_end > start);
663
664 if (wk_start >= end) {
665 break;
666 }
667
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000668 for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
669 const unsigned int kmax=std::min(k0 + _k_block, _Ktotal);
670
671 /* Figure out the size of each block. */
672 unsigned int k_size = kmax - k0;
673
SiCong Lidba672c2023-04-06 16:30:18 +0100674 // Correct the N range and buffer base if we are not processing the whole block.
675 size_t n_start = 0;
676 size_t n_end = _args._Nsize;
677
678 // If we are not doing the first columns, update the buffer write position and starting N value.
679 if (start > wk_start) {
680 n_start = (start - wk_start) * strategy::out_width();
681 }
682
683 // If we are not doing the last items, update the final N value.
684 if (end < wk_end) {
685 n_end = (end - wk_start) * strategy::out_width();
686 }
687
688 // Set the buffer pointer
689 Troi *buffer = buffer_base +
690 (roundup(_args._Nsize, strategy::out_width()) * (multi * _Ktotal + k0)) +
691 (n_start * roundup(k_size, strategy::k_unroll()));
692
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100693 if (_args._Ksections > 1) {
694 // We need to insert padding at the end of each K section.
SiCong Lidba672c2023-04-06 16:30:18 +0100695 // The computation needed is a little delicate - the k0/kmax coordinates are expressed in
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100696 // terms of the full, padded, _Ktotal.
697 // But we need to transform each section with reference to the original, unpadded, input, letting the
698 // transform pad each section as needed.
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000699
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100700 // This is needed for computations below.
701 const unsigned int rounded_section_size = roundup(_args._Ksize, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000702
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100703 // The expected output format is also an entire <out_width> columns interleaved, then the next set of
704 // columns, and so on. This means, as we are breaking it up vertically, we have to do it one column at
705 // a time.
SiCong Lidba672c2023-04-06 16:30:18 +0100706 for (unsigned int x0 = n_start; x0 < n_end; x0 += strategy::out_width()) {
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100707 unsigned int xmax = std::min(x0 + strategy::out_width(), _args._Nsize);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000708
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100709 // Track where we are and how much work is left.
710 unsigned int kpos = k0;
711 unsigned int kleft = k_size;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000712
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100713 while (kleft) {
714 // Which section are we in? Based on the rounded-up section size.
715 unsigned int k_section_base = kpos / rounded_section_size;
716 // How far into the section are we?
717 unsigned int k_offset = kpos - (k_section_base * rounded_section_size);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000718
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100719 // We will either copy the rest of this section, or to the end of the requested length.
720 unsigned int k_length = std::min(_args._Ksize - k_offset, kleft);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000721
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100722 strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
723 x0, xmax,
724 (k_section_base * _args._Ksize) + k_offset, // K starting point - compute row to read based on our section and the true section length.
Gunes Bayiref637392024-02-12 21:32:51 +0000725 (k_section_base * _args._Ksize) + k_offset + k_length, // K end point - starting point plus length computed above.
726 transposed);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000727
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100728 // We need to modify our position based on the ROUNDED version of what we just did.
729 unsigned int padded_length = roundup(k_length, strategy::k_unroll());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000730
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100731 buffer += strategy::out_width() * padded_length;
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000732
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100733 kpos += padded_length;
734 kleft -= padded_length;
735 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000736 }
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100737 } else {
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000738 // In the single K section case, can process the whole lot in one go.
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100739 strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
Gunes Bayiref637392024-02-12 21:32:51 +0000740 n_start, n_end, k0, std::min(kmax, _args._Ksize), transposed);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000741 }
742 }
743 }
744 }
745
746 void set_pretransposed_B_data(void *in_buffer) override {
747 // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
748 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100749 _B_transposed = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000750 _col_bias = reinterpret_cast<int32_t *>(in_buffer);
751 }
752
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100753 // Estimate cycles for given problem given provided parameters.
754 // "perf_type" is a type to pass along to get_performance_parameters to get the right set of performance
755 // parameters - it's arbitrary but usually either the input or output type.
756 template <typename perf_type>
757 static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os = {}) {
758 const PerformanceParameters params = strategy::template get_performance_parameters<perf_type>(args._ci);
759
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000760 // Note: Current hybrid kernels don't actually round up height (they
761 // have paths for each possible height). Might need to make this
762 // configurable in future.
Georgios Pinitas6f45cf72021-02-23 23:41:40 +0000763 uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000764
765 float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
766
767 // TODO: A bit of a kludge here: current hybrid kernels incur extra
768 // overhead where the width is not a multiple of kernel width. It's
769 // most noticable where the overall width is quite low, so add 15%
770 // penalty for such widths.
771 if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize < 2*strategy::out_width())) {
772 mac_cycles *= 1.15f;
773 }
774
775 uint64_t total_cycles = mac_cycles;
776
Georgios Pinitas33e03072021-01-14 13:43:40 +0000777 // Quantizing kernels with separate quantize need to add in the extra stages.
778 if (std::is_same<OutputStage, Requantize32>::value && SeparateQuantize) {
779 const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
780
781 // Row sums: need to consider each value in A (batch * multi * M * K)...
Georgios Pinitas6f45cf72021-02-23 23:41:40 +0000782 uint64_t rowsum_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * get_ktotal(args);
Georgios Pinitas33e03072021-01-14 13:43:40 +0000783
784 // ... but row sums are skipped if B offset==0.
785 if (qp->b_offset == 0) {
786 rowsum_bytes = 0;
787 }
788
789 // Use "prepare bytes per cycle" to store "row sum values per cycle".
790 float rowsum_cycles = static_cast<float>(rowsum_bytes) / params.prepare_bytes_cycle;
791
792 // Requantize: need to consider each value in C (batch * multi * M * N)
793 uint64_t requantize_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * args._Nsize;
794
795 // Use "merge bytes per cycle" to store "requantize values per cycle".
796 float requantize_cycles = static_cast<float>(requantize_bytes) / params.merge_bytes_cycle;
797
798 // Recalculate total_cycles with the extra components.
799 total_cycles = mac_cycles + rowsum_cycles + requantize_cycles;
800 }
801
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000802 return total_cycles;
803 }
804
805 void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
806 if (std::is_same<OutputStage, Requantize32>::value) {
807 Requantize32 *qp = reinterpret_cast<Requantize32 *>(&_os);
808
809 qp->bias = bias;
810 qp->bias_multi_stride = bias_multi_stride;
811 }
812 }
813
814 void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
815 assert(string_len == _args._Ksize);
816 _indirect_buf = ptr;
817 }
818
819 void set_convolution_parameters(ConvolutionParameters parms) override {
820 assert(parms.input_channels == _args._Ksize);
821 _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
822 }
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100823
824 GemmConfig get_config() override {
825 GemmConfig c;
826
827 c.method = GemmMethod::GEMM_HYBRID;
828 c.inner_block_size = _k_block;
829 c.outer_block_size = _n_block;
830 c.filter = get_type_name<strategy>();
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000831 c.weight_format = get_weight_format(kernel_weight_format<strategy, FixedFormat>::get(), sizeof(To));
Georgios Pinitas4ee8b152021-07-16 16:16:43 +0100832
833 return c;
834 }
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000835};
836
Francesco.Petrogalli@arm.com5fcf22d2022-04-05 10:31:08 +0000837template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
838using GemmHybridIndirectFixedFormat = GemmHybridIndirect<strategy, To, Tr, OutputStage, false, true>;
839
Georgios Pinitasc0b6f762020-11-02 01:37:17 +0000840} // namespace arm_gemm
841
842#ifdef __I_DEFINED_UNUSED
843#undef UNUSED
844#endif