blob: 1ae503cddb5ba24eddc0ac72e01fd81d513ed3e3 [file] [log] [blame]
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +01001/*
Georgios Pinitas1d480652019-01-23 11:24:50 +00002 * Copyright (c) 2017-2019 Arm Limited.
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#pragma once
25
Pablo Telloeb82fd22018-02-23 13:43:50 +000026#include <cstddef>
27
Michalis Spyrou6bff1952019-10-02 17:22:11 +010028#define UNUSED(x) (void)(x)
29
Pablo Telloeb82fd22018-02-23 13:43:50 +000030namespace arm_gemm {
31
32// Abstract class for the GEMM/GEMV functions.
33//
34// GEMM implementations may be "native" (never require any input
35// permutation), "pretransposed" (require permutation up-front) or require
36// working space (permute as they go along). This interface should support
37// all of them.
38
Georgios Pinitas1d480652019-01-23 11:24:50 +000039// The real GemmCommon class is templated based on the operand and return
40// type. This is an interface class which is independent of those types.
41class IGemmCommon {
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010042public:
Pablo Telloeb82fd22018-02-23 13:43:50 +000043 /* Pass in the pointers to the arrays to be operated on and their
Georgios Pinitas14613832019-03-01 19:07:11 +000044 * strides. This "generic" version uses void *s, the preferred version
45 * is the one provided by templated GemmCommon (below) which takes
46 * appropriately typed pointers. If B is pretransposed (see below) then
47 * the settings for B here are ignored.
Anthony Barbier5f707732018-07-03 16:22:02 +010048 */
Georgios Pinitas14613832019-03-01 19:07:11 +000049 virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
50 const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010051 void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0;
Pablo Telloeb82fd22018-02-23 13:43:50 +000052
53 /* For threading, we divide the work into some number of units and work
54 * out internally what unit corresponds to what work. This returns the
55 * total number of units. */
56 virtual unsigned int get_window_size() const = 0;
57
58 /* The maximum thread count is specified when the GEMM is created. Some
59 * implementations need to know how many threads will actually run in
60 * order to work properly.
61 *
62 * In some cases, after creating the GEMM the number of threads needs to
63 * be reduced (e.g. not enough work to split across threads). This
64 * method allows the number of actual threads to be run to be set (must
65 * be equal or lower).
66 *
67 * This has an empty default implementation, as GEMMs which don't care
68 * about thread count can safely ignore this.
69 */
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000070 virtual void set_nthreads(int) { };
Pablo Telloeb82fd22018-02-23 13:43:50 +000071
Georgios Pinitas1d480652019-01-23 11:24:50 +000072 /* Whether this GEMM can be dynamically scheduled or not. */
73 virtual bool supports_dynamic_scheduling() const { return false; }
74
Pablo Telloeb82fd22018-02-23 13:43:50 +000075 /* Actually do the work. Provide a threadid to index any per-thread
76 * buffers, and a start/end range to indicate which work to do. */
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000077 virtual void execute(unsigned int, unsigned int, int) = 0;
Pablo Telloeb82fd22018-02-23 13:43:50 +000078
79 /*** Working space interface (optional) ***/
80 /* Total number of bytes of temporary working space needed. If zero, it's not necessary to call set_working_space(). */
81 virtual size_t get_working_size() const { return 0; }
82 /* Provide working space buffer - the void * passed in must remain allocated for the duration of any execute calls. */
83 virtual void set_working_space(void *) { };
84
85 /*** "Pretransposed" interface (optional) ***/
86 /* Is this object set up for pretranspose? If so, pretranspose_array() needs to be called before execute(); */
87 virtual bool B_is_pretransposed() const { return false; }
88 /* Does pretranspose still need to be done? */
89 virtual bool B_pretranspose_required() const { return false; }
90 /* Total number of bytes of space needed for pretransposed arrays. */
91 virtual size_t get_B_pretransposed_array_size() const { return 0; }
Georgios Pinitas1d480652019-01-23 11:24:50 +000092 /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */
93 /* The "real" version of this depends on the templated operand type (see below). */
Georgios Pinitas14613832019-03-01 19:07:11 +000094 virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0;
Michalis Spyroue7e96e02018-04-13 13:44:10 +010095 /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000096 virtual void set_pretransposed_B_data(void *) { }
Pablo Telloeb82fd22018-02-23 13:43:50 +000097
Georgios Pinitascfa2bba2019-06-27 17:00:52 +010098 /*** "Quantized bias" interface (optional) ***/
99 /* Set the bias vector for quantized GEMMs */
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100100 virtual void set_quantized_bias(const int32_t *bias) { UNUSED(bias); }
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100101
Pablo Telloeb82fd22018-02-23 13:43:50 +0000102 // Destructor
Georgios Pinitas1d480652019-01-23 11:24:50 +0000103 virtual ~IGemmCommon() { }
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100104};
Pablo Telloeb82fd22018-02-23 13:43:50 +0000105
Georgios Pinitas1d480652019-01-23 11:24:50 +0000106/*
107 * "Real" GemmCommon class which is templated on the operand and return types.
108 *
109 * In addition to correctly typed versions of the functions that operate on
110 * operand and return data, this class provides a default implementation of
111 * 'set_arrays' to capture the provided arguments in protected class
112 * members, as essentially any implementation will need these.
113 */
114template<typename To, typename Tr>
115class GemmCommon : public IGemmCommon {
116protected:
117 const To *_Aptr=nullptr;
118 int _lda=0;
119 int _A_batch_stride=0;
120 int _A_multi_stride=0;
121 const To *_Bptr=nullptr;
122 int _ldb=0;
123 int _B_multi_stride=0;
124 Tr *_Cptr=nullptr;
125 int _ldc=0;
126 int _C_batch_stride=0;
127 int _C_multi_stride=0;
128
129public:
130 /* Pass in the pointers to the arrays to be operated on and their
131 * strides (templated version with appropriate types). */
132 virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
133 const To *B, const int ldb, /* batches share B */ const int B_multi_stride,
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100134 Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) {
Georgios Pinitas1d480652019-01-23 11:24:50 +0000135 _Aptr = A;
136 _lda = lda;
137 _A_batch_stride = A_batch_stride;
138 _A_multi_stride = A_multi_stride;
139 _Bptr = B;
140 _ldb = ldb;
141 _B_multi_stride = B_multi_stride;
142 _Cptr = C;
143 _ldc = ldc;
144 _C_batch_stride = C_batch_stride;
145 _C_multi_stride = C_multi_stride;
146 }
147
148 /* Implementation of the void * overload which casts its arguments to the appropriate type. */
Georgios Pinitas14613832019-03-01 19:07:11 +0000149 void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
150 const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
Georgios Pinitascfa2bba2019-06-27 17:00:52 +0100151 void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override {
Georgios Pinitas1d480652019-01-23 11:24:50 +0000152 set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride,
153 static_cast<const To *>(B), ldb, B_multi_stride,
154 static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride);
155 }
156
157 /*** "Pretransposed" interface ***/
158
159 /* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */
160 /* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */
161 virtual void pretranspose_B_array(void *, const To *, const int, const int) { };
162
163 /* Implementation of the void * overload which casts its arguments to the appropriate type. */
Georgios Pinitas14613832019-03-01 19:07:11 +0000164 void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override {
Georgios Pinitas1d480652019-01-23 11:24:50 +0000165 pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride);
166 }
167
168};
169
Georgios Pinitas14613832019-03-01 19:07:11 +0000170} // namespace arm_gemm