Moritz Pflanzer | beabe3b | 2017-08-31 14:56:32 +0100 | [diff] [blame] | 1 | /* |
Michele Di Giorgio | 6ad60af | 2020-06-09 14:52:15 +0100 | [diff] [blame^] | 2 | * Copyright (c) 2017-2020 ARM Limited. |
Moritz Pflanzer | beabe3b | 2017-08-31 14:56:32 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #pragma once |
| 25 | |
Michele Di Giorgio | 6ad60af | 2020-06-09 14:52:15 +0100 | [diff] [blame^] | 26 | #include "arm_gemm_compute_iface.hpp" |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 27 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 28 | #include <cstddef> |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 29 | #include <cassert> |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 30 | |
Michalis Spyrou | 6bff195 | 2019-10-02 17:22:11 +0100 | [diff] [blame] | 31 | #define UNUSED(x) (void)(x) |
| 32 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 33 | namespace arm_gemm { |
| 34 | |
| 35 | // Abstract class for the GEMM/GEMV functions. |
| 36 | // |
| 37 | // GEMM implementations may be "native" (never require any input |
| 38 | // permutation), "pretransposed" (require permutation up-front) or require |
| 39 | // working space (permute as they go along). This interface should support |
| 40 | // all of them. |
| 41 | |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 42 | // The real GemmCommon class is templated based on the operand and return |
| 43 | // type. This is an interface class which is independent of those types. |
| 44 | class IGemmCommon { |
Moritz Pflanzer | beabe3b | 2017-08-31 14:56:32 +0100 | [diff] [blame] | 45 | public: |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 46 | /* Pass in the pointers to the arrays to be operated on and their |
Georgios Pinitas | 1461383 | 2019-03-01 19:07:11 +0000 | [diff] [blame] | 47 | * strides. This "generic" version uses void *s, the preferred version |
| 48 | * is the one provided by templated GemmCommon (below) which takes |
| 49 | * appropriately typed pointers. If B is pretransposed (see below) then |
| 50 | * the settings for B here are ignored. |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 51 | */ |
Georgios Pinitas | 1461383 | 2019-03-01 19:07:11 +0000 | [diff] [blame] | 52 | virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, |
| 53 | const void *B, const int ldb, /* batches share B */ const int B_multi_stride, |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 54 | void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, |
| 55 | const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 56 | |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 57 | /** @returns an ndrange containing ranges of the compute space which can be |
| 58 | * broken up and parallelised over |
| 59 | */ |
| 60 | virtual ndrange_t get_window_size() const = 0; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 61 | |
| 62 | /* The maximum thread count is specified when the GEMM is created. Some |
| 63 | * implementations need to know how many threads will actually run in |
| 64 | * order to work properly. |
| 65 | * |
| 66 | * In some cases, after creating the GEMM the number of threads needs to |
| 67 | * be reduced (e.g. not enough work to split across threads). This |
| 68 | * method allows the number of actual threads to be run to be set (must |
| 69 | * be equal or lower). |
| 70 | * |
| 71 | * This has an empty default implementation, as GEMMs which don't care |
| 72 | * about thread count can safely ignore this. |
| 73 | */ |
Georgios Pinitas | 7cd26d4 | 2019-01-09 18:35:17 +0000 | [diff] [blame] | 74 | virtual void set_nthreads(int) { }; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 75 | |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 76 | /* Whether this GEMM can be dynamically scheduled or not. */ |
| 77 | virtual bool supports_dynamic_scheduling() const { return false; } |
| 78 | |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 79 | /** Main execute member fucntion |
| 80 | * @param [in] work_range specifies the range of work we want to be computed, total range defined by get_window_size() |
| 81 | * @param [in] thread_locator where are we inside of the thread space |
| 82 | * @naram [in] threadid a unique threadid |
| 83 | */ |
| 84 | virtual void execute(const ndcoord_t& work_range, const ndcoord_t& thread_locator, int threadid) = 0; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 85 | |
| 86 | /*** Working space interface (optional) ***/ |
| 87 | /* Total number of bytes of temporary working space needed. If zero, it's not necessary to call set_working_space(). */ |
| 88 | virtual size_t get_working_size() const { return 0; } |
| 89 | /* Provide working space buffer - the void * passed in must remain allocated for the duration of any execute calls. */ |
| 90 | virtual void set_working_space(void *) { }; |
| 91 | |
| 92 | /*** "Pretransposed" interface (optional) ***/ |
| 93 | /* Is this object set up for pretranspose? If so, pretranspose_array() needs to be called before execute(); */ |
| 94 | virtual bool B_is_pretransposed() const { return false; } |
| 95 | /* Does pretranspose still need to be done? */ |
| 96 | virtual bool B_pretranspose_required() const { return false; } |
| 97 | /* Total number of bytes of space needed for pretransposed arrays. */ |
| 98 | virtual size_t get_B_pretransposed_array_size() const { return 0; } |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 99 | /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */ |
| 100 | /* The "real" version of this depends on the templated operand type (see below). */ |
Georgios Pinitas | 1461383 | 2019-03-01 19:07:11 +0000 | [diff] [blame] | 101 | virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 102 | /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */ |
Georgios Pinitas | 7cd26d4 | 2019-01-09 18:35:17 +0000 | [diff] [blame] | 103 | virtual void set_pretransposed_B_data(void *) { } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 104 | |
Georgios Pinitas | cfa2bba | 2019-06-27 17:00:52 +0100 | [diff] [blame] | 105 | /*** "Quantized bias" interface (optional) ***/ |
| 106 | /* Set the bias vector for quantized GEMMs */ |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 107 | virtual void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) |
| 108 | { |
| 109 | UNUSED(bias); |
| 110 | UNUSED(bias_multi_stride); |
| 111 | } |
Georgios Pinitas | cfa2bba | 2019-06-27 17:00:52 +0100 | [diff] [blame] | 112 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 113 | // Destructor |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 114 | virtual ~IGemmCommon() { } |
Moritz Pflanzer | beabe3b | 2017-08-31 14:56:32 +0100 | [diff] [blame] | 115 | }; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 116 | |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 117 | /* "Real" GemmCommon class which is templated on the operand and return types. |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 118 | * |
| 119 | * In addition to correctly typed versions of the functions that operate on |
| 120 | * operand and return data, this class provides a default implementation of |
| 121 | * 'set_arrays' to capture the provided arguments in protected class |
| 122 | * members, as essentially any implementation will need these. |
| 123 | */ |
| 124 | template<typename To, typename Tr> |
| 125 | class GemmCommon : public IGemmCommon { |
| 126 | protected: |
| 127 | const To *_Aptr=nullptr; |
| 128 | int _lda=0; |
| 129 | int _A_batch_stride=0; |
| 130 | int _A_multi_stride=0; |
| 131 | const To *_Bptr=nullptr; |
| 132 | int _ldb=0; |
| 133 | int _B_multi_stride=0; |
| 134 | Tr *_Cptr=nullptr; |
| 135 | int _ldc=0; |
| 136 | int _C_batch_stride=0; |
| 137 | int _C_multi_stride=0; |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 138 | const Tr *_bias=nullptr; |
| 139 | int _bias_multi_stride=0; |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 140 | |
| 141 | public: |
| 142 | /* Pass in the pointers to the arrays to be operated on and their |
| 143 | * strides (templated version with appropriate types). */ |
| 144 | virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, |
| 145 | const To *B, const int ldb, /* batches share B */ const int B_multi_stride, |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 146 | Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, |
| 147 | const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride) { |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 148 | _Aptr = A; |
| 149 | _lda = lda; |
| 150 | _A_batch_stride = A_batch_stride; |
| 151 | _A_multi_stride = A_multi_stride; |
| 152 | _Bptr = B; |
| 153 | _ldb = ldb; |
| 154 | _B_multi_stride = B_multi_stride; |
| 155 | _Cptr = C; |
| 156 | _ldc = ldc; |
| 157 | _C_batch_stride = C_batch_stride; |
| 158 | _C_multi_stride = C_multi_stride; |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 159 | _bias = bias; |
| 160 | _bias_multi_stride = bias_multi_stride; |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | /* Implementation of the void * overload which casts its arguments to the appropriate type. */ |
Georgios Pinitas | 1461383 | 2019-03-01 19:07:11 +0000 | [diff] [blame] | 164 | void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, |
| 165 | const void *B, const int ldb, /* batches share B */ const int B_multi_stride, |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 166 | void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, |
| 167 | const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override { |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 168 | set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride, |
| 169 | static_cast<const To *>(B), ldb, B_multi_stride, |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 170 | static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride, |
| 171 | static_cast<const Tr *>(bias), bias_multi_stride); |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | /*** "Pretransposed" interface ***/ |
| 175 | |
| 176 | /* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */ |
| 177 | /* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */ |
| 178 | virtual void pretranspose_B_array(void *, const To *, const int, const int) { }; |
| 179 | |
| 180 | /* Implementation of the void * overload which casts its arguments to the appropriate type. */ |
Georgios Pinitas | 1461383 | 2019-03-01 19:07:11 +0000 | [diff] [blame] | 181 | void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override { |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 182 | pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride); |
| 183 | } |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 184 | }; |
| 185 | |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 186 | template<typename GemmKernel> |
| 187 | inline |
| 188 | int unsigned get_total_window_size(const GemmKernel& kernel) |
| 189 | { |
| 190 | auto window=kernel.get_window_size(); |
| 191 | |
| 192 | unsigned int total = 1; |
| 193 | for(unsigned i = 0; i != arm_gemm::ndrange_max; ++i) |
| 194 | { |
| 195 | total *= window.get_size(i); |
| 196 | } |
| 197 | |
| 198 | return total; |
| 199 | } |
| 200 | |
Georgios Pinitas | 1461383 | 2019-03-01 19:07:11 +0000 | [diff] [blame] | 201 | } // namespace arm_gemm |