Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 1 | /* |
Georgios Pinitas | 5aa1a0b | 2020-07-02 20:02:20 +0100 | [diff] [blame] | 2 | * Copyright (c) 2017-2020 Arm Limited. |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #pragma once |
| 25 | |
| 26 | #include <stdio.h> |
| 27 | |
| 28 | #include "arm_gemm.hpp" |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 29 | #include "bias_adder.hpp" |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 30 | #include "mergeresults.hpp" |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 31 | #include "transform.hpp" |
| 32 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 33 | #ifdef CYCLE_PROFILING |
| 34 | #include "profiler.hpp" |
| 35 | #endif |
| 36 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 37 | namespace arm_gemm { |
| 38 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 39 | // Implementation of the GemmCommon abstract class. |
| 40 | // |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 41 | // This is implementation is for GEMV with pretransposition. |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 42 | // |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 43 | // batches are not supported as a batched GEMV makes no sense (can be converted to a GEMM). |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 44 | template<typename strategy, typename To, typename Tr> |
| 45 | class GemvPretransposed : public GemmCommon<To, Tr> { |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 46 | typedef typename strategy::operand_type Toi; |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 47 | typedef typename strategy::result_type Tri; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 48 | |
| 49 | const unsigned int _Nsize; |
| 50 | const unsigned int _Ksize; |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 51 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 52 | const unsigned int _nmultis; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 53 | |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 54 | const Activation _act; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 55 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 56 | const CPUInfo * const _ci; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 57 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 58 | const unsigned int _buffer_per_multi; |
| 59 | |
| 60 | unsigned int m_block=0; |
| 61 | unsigned int n_block=0; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 62 | |
| 63 | const Toi *_A_pretransposed = nullptr; |
| 64 | |
| 65 | public: |
| 66 | GemvPretransposed(GemvPretransposed &) = delete; |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 67 | GemvPretransposed & operator= (GemvPretransposed &) = delete; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 68 | |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 69 | GemvPretransposed(const GemmArgs &args) |
Georgios Pinitas | 0cc50ed | 2020-07-06 19:10:38 +0100 | [diff] [blame] | 70 | : _Nsize(args._Nsize), _Ksize(args._Ksize), _nmultis(args._nmulti), _act(args._act), _ci(args._ci), |
Georgios Pinitas | cfa2bba | 2019-06-27 17:00:52 +0100 | [diff] [blame] | 71 | _buffer_per_multi(_Ksize * iceildiv(_Nsize, strategy::A_interleave()) * strategy::A_interleave()) { |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 72 | /* For now don't do any blocking. TODO: figure out if we should. */ |
Georgios Pinitas | 7cd26d4 | 2019-01-09 18:35:17 +0000 | [diff] [blame] | 73 | if (args._cfg && args._cfg->inner_block_size) { |
| 74 | m_block = args._cfg->inner_block_size; |
| 75 | } else { |
| 76 | m_block = _Ksize; |
| 77 | } |
| 78 | |
| 79 | if (args._cfg && args._cfg->outer_block_size) { |
| 80 | n_block = args._cfg->outer_block_size; |
| 81 | } else { |
| 82 | n_block = _Nsize; |
| 83 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 84 | } |
| 85 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 86 | // Window is number of out_width blocks, times number of multis. |
Joseph Dobson | 6f8b17d | 2020-02-11 19:32:11 +0000 | [diff] [blame] | 87 | ndrange_t get_window_size() const override { |
Georgios Pinitas | 5aa1a0b | 2020-07-02 20:02:20 +0100 | [diff] [blame] | 88 | return { iceildiv(_Nsize, strategy::out_width()) * _nmultis }; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | // Actually execute the GEMV. |
Georgios Pinitas | 5aa1a0b | 2020-07-02 20:02:20 +0100 | [diff] [blame] | 92 | void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 93 | #ifdef CYCLE_PROFILING |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 94 | profiler prof; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 95 | #endif |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 96 | strategy strat(_ci); |
| 97 | |
Georgios Pinitas | 5aa1a0b | 2020-07-02 20:02:20 +0100 | [diff] [blame] | 98 | const auto start = work_range.get_position(0); |
| 99 | const auto end = work_range.get_position_end(0); |
| 100 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 101 | /* Break the window values down into multis of interest... */ |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 102 | const unsigned int window_per_multi = iceildiv(_Nsize, strategy::out_width()); |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 103 | const unsigned int multi_0 = start / window_per_multi; |
| 104 | const unsigned int multi_end = end / window_per_multi; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 105 | |
| 106 | /* ... and figure out where we start and end in the first and last multi. */ |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 107 | const unsigned int n_0 = (start - (multi_0 * window_per_multi)) * strategy::out_width(); |
| 108 | const unsigned int n_max = (end - (multi_end * window_per_multi)) * strategy::out_width(); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 109 | |
| 110 | static_assert(std::is_same<Tr, Tri>::value, "GemvPretransposed: Result types must be the same."); |
| 111 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 112 | for (unsigned int multi=multi_0; multi<=multi_end; multi++) { |
| 113 | const unsigned int n_start = (multi==multi_0) ? n_0 : 0; |
| 114 | const unsigned int n_end = (multi==multi_end) ? n_max : _Nsize; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 115 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 116 | if (n_end <= n_start) |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 117 | continue; |
| 118 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 119 | for (unsigned int m0=0; m0<_Ksize; m0+=m_block) { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 120 | unsigned int mmax = std::min(m0 + m_block, _Ksize); |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 121 | |
| 122 | for (unsigned int n=n_start; n<n_end; n+=n_block) { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 123 | unsigned int nmax = std::min(n + n_block, n_end); |
| 124 | #ifdef CYCLE_PROFILING |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 125 | auto p = prof.ScopedProfiler(PROFILE_KERNEL, (mmax-m0) * (nmax-n)); |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 126 | #endif |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 127 | /* This assumes that the underlying call was a GEMM with M=1; for the N=1 case we would have to pick up this->_Bptr below instead */ |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 128 | strat.kernel(_A_pretransposed + (multi * _buffer_per_multi) + (n * _Ksize) + (m0 * strategy::A_interleave()), |
| 129 | (_Ksize * strategy::A_interleave()), |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 130 | this->_Aptr + (multi * this->_A_multi_stride) + m0, |
| 131 | this->_Cptr + (multi * this->_C_multi_stride) + n, |
Georgios Pinitas | 48b3ef8 | 2019-10-14 19:03:09 +0100 | [diff] [blame] | 132 | static_cast<Tr>(0), (mmax-m0), (nmax-n)); |
| 133 | |
| 134 | // Handle activation separately for now |
| 135 | if (this->_bias) { |
| 136 | activator<true>(this->_Cptr + (multi * this->_C_multi_stride) + n, 0, |
| 137 | this->_bias + (multi * this->_bias_multi_stride) + n, |
| 138 | _act, 1, (nmax-n)); |
| 139 | } else { |
| 140 | activator<false>(this->_Cptr + (multi * this->_C_multi_stride) + n, 0, |
| 141 | static_cast<const Tr *>(nullptr), |
| 142 | _act, 1, (nmax-n)); |
| 143 | } |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 144 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 145 | } |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | /* Pretransposed interface implementation */ |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 150 | bool B_is_pretransposed() const override { |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 151 | return true; |
| 152 | } |
| 153 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 154 | bool B_pretranspose_required() const override { |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 155 | /* Transpose is required if _A_pretransposed is still nullptr */ |
| 156 | return (_A_pretransposed == nullptr); |
| 157 | } |
| 158 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 159 | size_t get_B_pretransposed_array_size() const override { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 160 | return _buffer_per_multi * _nmultis * sizeof(To); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 161 | } |
| 162 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 163 | void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override { |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 164 | Toi *A_buffer = reinterpret_cast<Toi *>(buffer); |
| 165 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 166 | for (unsigned int multi=0; multi<_nmultis; multi++) { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 167 | /* Reverse sense here as we are dealing with B rather than A. So if |
| 168 | * strategy::A_transpose is false and _trB is false, we still |
| 169 | * transpose. */ |
Georgios Pinitas | 0cc50ed | 2020-07-06 19:10:38 +0100 | [diff] [blame] | 170 | if (strategy::A_transpose()) { |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 171 | Transform<strategy::A_interleave(), strategy::A_block(), false>(A_buffer + (multi * _buffer_per_multi), B + (multi * B_multi_stride), ldb, 0, _Nsize, 0, _Ksize); |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 172 | } else { |
Georgios Pinitas | 1d48065 | 2019-01-23 11:24:50 +0000 | [diff] [blame] | 173 | Transform<strategy::A_interleave(), strategy::A_block(), true>(A_buffer + (multi * _buffer_per_multi), B + (multi * B_multi_stride), ldb, 0, _Nsize, 0, _Ksize); |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 174 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | _A_pretransposed = A_buffer; |
| 178 | } |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 179 | |
Anthony Barbier | 5f70773 | 2018-07-03 16:22:02 +0100 | [diff] [blame] | 180 | void set_pretransposed_B_data(void *buffer) override { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame] | 181 | _A_pretransposed = reinterpret_cast<Toi *>(buffer); |
| 182 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 183 | }; |
| 184 | |
| 185 | } // namespace arm_gemm |