Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017-2018 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #pragma once |
| 25 | |
| 26 | #include <assert.h> |
| 27 | #include <stdio.h> |
| 28 | |
| 29 | #include <algorithm> |
| 30 | |
| 31 | #include "arm_gemm.hpp" |
| 32 | #include "utils.hpp" |
| 33 | |
| 34 | #include "buffer_manager.hpp" |
| 35 | #include "mergeresults.hpp" |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 36 | #include "transform.hpp" |
| 37 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 38 | #ifdef CYCLE_PROFILING |
| 39 | #include "profiler.hpp" |
| 40 | #endif |
| 41 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 42 | // Some macros used to decide how much working space to allocate. |
| 43 | // Round allocations up to the next cache line. |
| 44 | #define ALLOC_ROUND 64 |
| 45 | #define ROUND_UP(x) ((((x) + ALLOC_ROUND - 1) / ALLOC_ROUND) * ALLOC_ROUND) |
| 46 | |
| 47 | // Implementation of the GemmCommon abstract class. |
| 48 | // |
| 49 | // This implementation interleaves the source matrices in blocks - good for |
| 50 | // larger matrices. |
| 51 | namespace arm_gemm |
| 52 | { |
| 53 | template <typename strategy, typename To, typename Tr> |
| 54 | class GemmInterleaved : public GemmCommon<To, Tr> |
| 55 | { |
| 56 | typedef typename strategy::operand_type Toi; |
| 57 | typedef typename strategy::result_type Tri; |
| 58 | |
| 59 | /* const properties set by constructor */ |
| 60 | const CPUInfo *const _ci; |
| 61 | |
| 62 | const unsigned int _Msize; |
| 63 | const unsigned int _Nsize; |
| 64 | const unsigned int _Ksize; |
| 65 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 66 | const unsigned int _nbatches; |
| 67 | const unsigned int _nmulti; |
| 68 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 69 | const bool _trA; |
| 70 | const bool _trB; |
| 71 | |
| 72 | const Tr _alpha; |
| 73 | const Tr _beta; |
| 74 | |
| 75 | const unsigned int _maxthreads; |
| 76 | const bool _pretransposed; |
| 77 | |
| 78 | /* Blocking info */ |
| 79 | unsigned int _k_block = 0; |
| 80 | unsigned int _x_block = 0; |
| 81 | unsigned int _Mround = 0; |
| 82 | |
| 83 | /* Working space, pretransposed buffer, buffer manager */ |
| 84 | const Toi *_B_transposed = nullptr; |
| 85 | BufferManager *_bm = nullptr; |
| 86 | void *_working_space = nullptr; |
| 87 | |
| 88 | /* We will need to walk through the blocks of B in a few contexts, so |
| 89 | * factor that out. */ |
| 90 | class blockwalker |
| 91 | { |
| 92 | private: |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 93 | /* Size loops, etc. based on our parent's configuration */ |
| 94 | const GemmInterleaved<strategy, To, Tr> &_parent; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 95 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 96 | /* K and X and multi parameters for current iteration. */ |
| 97 | unsigned int _k0 = 0, _x0 = 0, _multi = 0; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 98 | |
| 99 | unsigned int _index = 0; |
| 100 | bool _done = false; |
| 101 | bool _newkblock = true; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 102 | bool _newmulti = true; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 103 | |
| 104 | public: |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 105 | blockwalker(const GemmInterleaved<strategy, To, Tr> &parent) |
| 106 | : _parent(parent) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 107 | { |
| 108 | } |
| 109 | |
| 110 | unsigned int xmax() |
| 111 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 112 | return std::min(_x0 + _parent._x_block, _parent._Nsize); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | unsigned int kmax() |
| 116 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 117 | return std::min(_k0 + _parent._k_block, _parent._Ksize); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | /* Advance to the next block, return false at the end. */ |
| 121 | bool advance(void) |
| 122 | { |
| 123 | if(_done) |
| 124 | { |
| 125 | return false; |
| 126 | } |
| 127 | |
| 128 | _newkblock = false; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 129 | _x0 += _parent._x_block; |
| 130 | if(_x0 >= _parent._Nsize) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 131 | { |
| 132 | _x0 = 0; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 133 | _k0 += _parent._k_block; |
| 134 | if(_k0 >= _parent._Ksize) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 135 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 136 | _k0 = 0; |
| 137 | _multi++; |
| 138 | if(_multi >= _parent._nmulti) |
| 139 | { |
| 140 | _done = true; |
| 141 | return false; |
| 142 | } |
| 143 | _newmulti = true; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 144 | } |
| 145 | _newkblock = true; |
| 146 | } |
| 147 | _index++; |
| 148 | |
| 149 | return true; |
| 150 | } |
| 151 | |
| 152 | unsigned int k0(void) |
| 153 | { |
| 154 | return _k0; |
| 155 | } |
| 156 | unsigned int x0(void) |
| 157 | { |
| 158 | return _x0; |
| 159 | } |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 160 | unsigned int multi(void) |
| 161 | { |
| 162 | return _multi; |
| 163 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 164 | unsigned int index(void) |
| 165 | { |
| 166 | return _index; |
| 167 | } |
| 168 | bool done(void) |
| 169 | { |
| 170 | return _done; |
| 171 | } |
| 172 | bool newkblock(void) |
| 173 | { |
| 174 | return _newkblock; |
| 175 | } |
| 176 | }; |
| 177 | |
| 178 | // A working size: One of these needed, regardless of thread count. Divided according to window. |
| 179 | size_t get_a_working_size() const |
| 180 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 181 | return ROUND_UP(sizeof(Toi) * _k_block * _Mround * _nbatches); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | // B working size: 0, 1 or 3 of these needed depending on pretransposed and threading settings. |
| 185 | size_t get_b_working_size() const |
| 186 | { |
| 187 | return ROUND_UP(sizeof(Toi) * _x_block * _k_block); |
| 188 | } |
| 189 | |
| 190 | // C working size: One needed per thread. |
| 191 | size_t get_c_working_size() const |
| 192 | { |
| 193 | return ROUND_UP(sizeof(Tri) * _x_block * strategy::out_height); |
| 194 | } |
| 195 | |
| 196 | // Internal execute function. |
| 197 | // This supports both the "pretransposed" and "standard" interfaces via the template parameter. |
| 198 | template <bool pretransposed> |
| 199 | void execute_internal(unsigned int start, unsigned int end, int threadid) |
| 200 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 201 | #ifdef CYCLE_PROFILING |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 202 | profiler prof; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 203 | #endif |
| 204 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 205 | strategy strat(_ci); |
| 206 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 207 | blockwalker current(*this); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 208 | blockwalker next = current; |
| 209 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 210 | /* Translate 'start' and 'end' into a position within the batches and rows. */ |
| 211 | const unsigned int window_per_batch = _Mround / strategy::out_height; |
| 212 | unsigned int batch_0 = start / window_per_batch; |
| 213 | unsigned int batch_end = end / window_per_batch; |
| 214 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 215 | /* Compute the M values to operate on */ |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 216 | unsigned int m_0 = (start - (batch_0 * window_per_batch)) * strategy::out_height; |
| 217 | unsigned int m_max = (end - (batch_end * window_per_batch)) * strategy::out_height; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 218 | |
| 219 | /* Make sure we've been set up correctly. */ |
| 220 | if(pretransposed) |
| 221 | { |
| 222 | assert(_B_transposed); |
| 223 | } |
| 224 | else |
| 225 | { |
| 226 | assert(_bm); |
| 227 | } |
| 228 | |
| 229 | assert(_working_space); |
| 230 | int8_t *working_space_bytes = reinterpret_cast<int8_t *>(_working_space); |
| 231 | |
| 232 | // Private buffers. Treat working_space as an array of C buffers (one per thread) first, followed by the (window-divided) A buffer. |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 233 | // Set a_panel to the base of the A buffers - compute offsets into it based on M/batches later. |
| 234 | Toi *const a_panel = reinterpret_cast<Toi *>(working_space_bytes + (_maxthreads * get_c_working_size())); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 235 | Tri *const c_panel = reinterpret_cast<Tri *>(working_space_bytes + (threadid * get_c_working_size())); |
| 236 | |
| 237 | // Shared buffers - these come either from BufferManager or _B_transposed. |
| 238 | const Toi *b_panel; |
| 239 | |
| 240 | if(pretransposed) |
| 241 | { |
| 242 | b_panel = _B_transposed; |
| 243 | } |
| 244 | |
| 245 | //printf("Starting GEMM loop, x_block=%d, k_block=%d\n", _x_block, _k_block); |
| 246 | |
| 247 | // newkblock() is always true on the first iteration, so this will be set properly on the first loop. |
| 248 | int kern_k = 0; |
| 249 | |
| 250 | for(; !current.done(); current.advance()) |
| 251 | { |
| 252 | if(current.newkblock()) |
| 253 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 254 | #ifdef CYCLE_PROFILING |
| 255 | auto p = prof.ScopedProfiler(PROFILE_PREPA, (end - start) * strategy::out_height * (current.kmax() - current.k0()) * sizeof(Toi)); |
| 256 | #endif |
| 257 | for(unsigned int batch = batch_0; batch <= batch_end; batch++) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 258 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 259 | unsigned int first_m = (batch == batch_0) ? m_0 : 0; |
| 260 | unsigned int last_m = (batch == batch_end) ? m_max : _Msize; |
| 261 | |
| 262 | if(first_m >= last_m) |
| 263 | continue; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 264 | if(_trA ^ strategy::A_transpose) |
| 265 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 266 | Transform<strategy::A_interleave, strategy::A_block, true>( |
| 267 | a_panel + ((batch * _Mround + first_m) * _k_block), |
| 268 | this->_Aptr + (batch * this->_A_batch_stride) + (current.multi() * this->_A_multi_stride), |
| 269 | this->_lda, first_m, last_m, current.k0(), current.kmax()); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 270 | } |
| 271 | else |
| 272 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 273 | Transform<strategy::A_interleave, strategy::A_block, false>( |
| 274 | a_panel + ((batch * _Mround + first_m) * _k_block), |
| 275 | this->_Aptr + (batch * this->_A_batch_stride) + (current.multi() * this->_A_multi_stride), |
| 276 | this->_lda, first_m, last_m, current.k0(), current.kmax()); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 277 | } |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 278 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 279 | |
| 280 | // Figure out how many "K" the kernel will actually process. |
| 281 | kern_k = iceildiv(current.kmax() - current.k0(), strategy::k_unroll); |
| 282 | kern_k *= strat.k_unroll; |
| 283 | } |
| 284 | |
| 285 | int bblocks = iceildiv(current.xmax() - current.x0(), strategy::out_width); |
| 286 | |
| 287 | if(!pretransposed) |
| 288 | { |
| 289 | /* Look ahead to the next block and populate it if necessary. |
| 290 | * This avoids the populate operation becoming a bottleneck, and |
| 291 | * helps keep the threads synchronized (the first thread to get |
| 292 | * here will populate while the rest will advance). |
| 293 | * |
| 294 | * If we are running single threaded, bm->try_populate() will do |
| 295 | * nothing. |
| 296 | */ |
| 297 | if(next.advance()) |
| 298 | { |
| 299 | _bm->try_populate(next.index(), [&](void *buffer) |
| 300 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 301 | #ifdef CYCLE_PROFILING |
| 302 | auto p = prof.ScopedProfiler(PROFILE_PREPB, (next.xmax() - next.x0()) * (next.kmax() - next.k0()) * sizeof(Toi)); |
| 303 | #endif |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 304 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 305 | Toi *b_panel = reinterpret_cast<Toi *>(buffer); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 306 | if(_trB ^ strategy::B_transpose) |
| 307 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 308 | Transform<strategy::B_interleave, strategy::B_block, true>( |
| 309 | b_panel, this->_Bptr + (next.multi() * this->_B_multi_stride), this->_ldb, |
| 310 | next.x0(), next.xmax(), next.k0(), next.kmax()); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 311 | } |
| 312 | else |
| 313 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 314 | Transform<strategy::B_interleave, strategy::B_block, false>( |
| 315 | b_panel, this->_Bptr + (next.multi() * this->_B_multi_stride), this->_ldb, |
| 316 | next.x0(), next.xmax(), next.k0(), next.kmax()); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 317 | } |
| 318 | }); |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 319 | } |
| 320 | /* Get the buffer for this iteration from the BufferManager. */ |
| 321 | b_panel = reinterpret_cast<Toi *>(_bm->get(current.index(), [&](void *bpv) |
| 322 | { |
| 323 | #ifdef CYCLE_PROFILING |
| 324 | auto p = prof.ScopedProfiler(PROFILE_PREPB, (current.xmax() - current.x0()) * (current.kmax() - current.k0()) * sizeof(Toi)); |
| 325 | #endif |
| 326 | |
| 327 | Toi *b_panel = reinterpret_cast<Toi *>(bpv); |
| 328 | if(_trB ^ strategy::B_transpose) |
| 329 | { |
| 330 | Transform<strategy::B_interleave, strategy::B_block, true>( |
| 331 | b_panel, this->_Bptr + (current.multi() * this->_B_multi_stride), this->_ldb, |
| 332 | current.x0(), current.xmax(), current.k0(), current.kmax()); |
| 333 | } |
| 334 | else |
| 335 | { |
| 336 | Transform<strategy::B_interleave, strategy::B_block, false>( |
| 337 | b_panel, this->_Bptr + (current.multi() * this->_B_multi_stride), this->_ldb, |
| 338 | current.x0(), current.xmax(), current.k0(), current.kmax()); |
| 339 | } |
| 340 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 341 | })); |
| 342 | } |
| 343 | |
| 344 | /* Do the actual work. */ |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 345 | for(unsigned int batch = batch_0; batch <= batch_end; batch++) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 346 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 347 | unsigned int first_m = (batch == batch_0) ? m_0 : 0; |
| 348 | unsigned int last_m = (batch == batch_end) ? m_max : _Msize; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 349 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 350 | const Toi *a_ptr = a_panel + (batch * _Mround + first_m) * _k_block; |
| 351 | |
| 352 | if(first_m >= last_m) |
| 353 | continue; |
| 354 | |
| 355 | for(unsigned int y = first_m; y < last_m; y += strategy::out_height) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 356 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 357 | unsigned int ymax = std::min(_Msize, y + strategy::out_height); |
| 358 | |
| 359 | { |
| 360 | #ifdef CYCLE_PROFILING |
| 361 | auto p = prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height * bblocks * strategy::out_width * kern_k)); |
| 362 | #endif |
| 363 | |
| 364 | strat.kernel(a_ptr, b_panel, c_panel, 1, bblocks, kern_k); |
| 365 | |
| 366 | a_ptr += (strategy::out_height * kern_k); |
| 367 | } |
| 368 | |
| 369 | { |
| 370 | #ifdef CYCLE_PROFILING |
| 371 | auto p = prof.ScopedProfiler(PROFILE_MERGE, (strategy::out_height * bblocks * strategy::out_width * sizeof(Tr))); |
| 372 | #endif |
| 373 | MergeResults<strategy::out_width, strategy::out_height>( |
| 374 | this->_Cptr + (batch * this->_C_batch_stride) + (current.multi() * this->_C_multi_stride), |
| 375 | c_panel, this->_ldc, y, ymax, current.x0(), current.xmax(), |
| 376 | _alpha, (current.k0() == 0 ? _beta : static_cast<Tr>(1))); |
| 377 | } |
| 378 | } |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | if(pretransposed) |
| 382 | { |
| 383 | b_panel += (bblocks * strat.out_width * kern_k); |
| 384 | } |
| 385 | else |
| 386 | { |
| 387 | _bm->release(current.index()); |
| 388 | } |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | public: |
| 393 | GemmInterleaved(GemmInterleaved &) = delete; |
| 394 | GemmInterleaved &operator=(GemmInterleaved &) = delete; |
| 395 | |
| 396 | /* Constructor */ |
| 397 | GemmInterleaved(const CPUInfo *ci, const unsigned int M, const unsigned int N, const unsigned int K, |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 398 | const unsigned int nbatches, const unsigned int nmulti, const bool trA, const bool trB, |
| 399 | const Tr alpha, const Tr beta, const int maxthreads, const bool pretransposed) |
| 400 | : _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti), _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads), _pretransposed(pretransposed) |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 401 | { |
| 402 | const unsigned int L1_size = ci->get_L1_cache_size(); |
| 403 | const unsigned int L2_size = ci->get_L2_cache_size(); |
| 404 | |
| 405 | assert(maxthreads > 0); |
| 406 | |
| 407 | // Work out blocking parameters |
| 408 | |
| 409 | // k_block: Find out how much of the larger array can be loaded into half the cache. |
| 410 | // This should account for associative caches. |
| 411 | _k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width, strategy::out_height))); |
| 412 | |
| 413 | // Needs to be (at least a single) multiple of the K unroll level. |
| 414 | _k_block /= strategy::k_unroll; |
| 415 | _k_block = std::max(_k_block, 1U) * strategy::k_unroll; |
| 416 | |
| 417 | // Now tune to presented problem size; this is how many blocks we need. |
| 418 | int num_k_blocks = iceildiv(K, _k_block); |
| 419 | |
| 420 | // So divide the space equally into that many blocks. |
| 421 | _k_block = iceildiv(K, num_k_blocks); |
| 422 | |
| 423 | // And round UP to the K unroll level required. |
| 424 | _k_block = iceildiv(_k_block, strategy::k_unroll); |
| 425 | _k_block *= strategy::k_unroll; |
| 426 | |
| 427 | // x_block: Work out how many rows (of length k_block) will fit in the L2 |
| 428 | // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents. |
| 429 | _x_block = (((L2_size * 9) / 10) - (_k_block * sizeof(Toi) * (strategy::out_width + strategy::out_height))) / (sizeof(Toi) * _k_block); |
| 430 | |
| 431 | // Needs to be (at least a single) multiple of the kernel output width. |
| 432 | _x_block /= strategy::out_width; |
| 433 | _x_block = std::max(_x_block, 1U) * strategy::out_width; |
| 434 | |
| 435 | // And tune to the presented problem size. |
| 436 | int num_x_blocks = iceildiv(N, _x_block); |
| 437 | _x_block = iceildiv(N, num_x_blocks); |
| 438 | |
| 439 | _x_block = iceildiv(_x_block, strategy::out_width); |
| 440 | _x_block *= strategy::out_width; |
| 441 | |
| 442 | // Work out the rounded size of M - needed for some buffers. |
| 443 | _Mround = iceildiv(M, strategy::out_height); |
| 444 | _Mround *= strategy::out_height; |
| 445 | } |
| 446 | |
| 447 | // Interface implementation - Compulsory functions |
| 448 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 449 | // Window size: Only the last thread should do a ragged block, so dole |
| 450 | // out work in units of out_height. Factor batches into the window, but |
| 451 | // not multi for now (as this would cause problems with the buffer |
| 452 | // manager). |
| 453 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 454 | unsigned int get_window_size() const override |
| 455 | { |
| 456 | // _Mround is a multiple of out_height by definition. |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 457 | return (_Mround / strategy::out_height) * _nbatches; |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | // set_nthreads: pass on to buffer manager to avoid it waiting for non-existant threads. |
| 461 | void set_nthreads(int nthreads) override |
| 462 | { |
| 463 | if(_bm) |
| 464 | { |
| 465 | _bm->set_nthreads(nthreads); |
| 466 | } |
| 467 | } |
| 468 | |
| 469 | // Execute |
| 470 | void execute(unsigned int start, unsigned int end, int threadid) override |
| 471 | { |
| 472 | if(_pretransposed) |
| 473 | { |
| 474 | execute_internal<true>(start, end, threadid); |
| 475 | } |
| 476 | else |
| 477 | { |
| 478 | execute_internal<false>(start, end, threadid); |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | // Interface implementation - working space |
| 483 | size_t get_working_size() const override |
| 484 | { |
| 485 | // In all cases, we need one A buffer plus a C buffer per thread. |
| 486 | size_t size = get_a_working_size() + (get_c_working_size() * _maxthreads); |
| 487 | |
| 488 | // For pretransposed case, there is no working space needed for B. |
| 489 | // Otherwise, we need a BufferManager. |
| 490 | if(!_pretransposed) |
| 491 | { |
| 492 | size += BufferManager::get_storage_requirement(_maxthreads, get_b_working_size()); |
| 493 | } |
| 494 | |
| 495 | size += 64; // Add on a cache line extra for alignment. |
| 496 | |
| 497 | return size; |
| 498 | } |
| 499 | |
| 500 | void set_working_space(void *working_space) override |
| 501 | { |
| 502 | // Make sure everything ends up cache line aligned |
| 503 | int8_t *working_space_bytes = reinterpret_cast<int8_t *>(working_space); |
| 504 | intptr_t working_space_int = reinterpret_cast<intptr_t>(working_space); |
| 505 | |
| 506 | size_t diff = 0; |
| 507 | |
| 508 | if(working_space_int & 0x3F) |
| 509 | { |
| 510 | diff = 0x40 - (working_space_int & 0x3F); |
| 511 | } |
| 512 | |
| 513 | working_space_bytes += diff; |
| 514 | |
| 515 | if(_pretransposed) |
| 516 | { |
| 517 | // Pretransposed case: just set internal pointer to parameter value. |
| 518 | _working_space = reinterpret_cast<void *>(working_space_bytes); |
| 519 | } |
| 520 | else |
| 521 | { |
| 522 | // Otherwise, use the first part of the working space for the buffer manager. |
| 523 | // It's legal to call this again so don't leak a buffer manager if it already existed. |
| 524 | delete _bm; |
| 525 | |
| 526 | _bm = new BufferManager(_maxthreads, get_b_working_size(), reinterpret_cast<void *>(working_space_bytes)); |
| 527 | |
| 528 | working_space_bytes += BufferManager::get_storage_requirement(_maxthreads, get_b_working_size()); |
| 529 | |
| 530 | _working_space = reinterpret_cast<void *>(working_space_bytes); |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | // Interface implementation - pretransposed |
| 535 | bool B_is_pretransposed() const override |
| 536 | { |
| 537 | return _pretransposed; |
| 538 | } |
| 539 | |
| 540 | bool B_pretranspose_required() const override |
| 541 | { |
| 542 | return _pretransposed && (_B_transposed == nullptr); |
| 543 | } |
| 544 | |
| 545 | // TODO: this could almost certainly be considerably simpler. |
| 546 | size_t get_B_pretransposed_array_size() const override |
| 547 | { |
| 548 | size_t total = 0; |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 549 | blockwalker current(*this); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 550 | |
| 551 | do |
| 552 | { |
| 553 | /* Figure out the size of each block. */ |
| 554 | size_t x_size = (current.xmax() - current.x0()); |
| 555 | size_t k_size = (current.kmax() - current.k0()); |
| 556 | |
| 557 | /* Round sizes up as needed. */ |
| 558 | x_size = iceildiv(x_size, strategy::out_width); |
| 559 | x_size *= strategy::out_width; |
| 560 | |
| 561 | k_size = iceildiv(k_size, strategy::k_unroll); |
| 562 | k_size *= strategy::k_unroll; |
| 563 | |
| 564 | total += x_size * k_size * sizeof(Toi); |
| 565 | } |
| 566 | while(current.advance()); |
| 567 | |
| 568 | return total; |
| 569 | } |
| 570 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 571 | void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 572 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 573 | blockwalker current(*this); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 574 | Toi *buffer = reinterpret_cast<Toi *>(in_buffer); |
| 575 | _B_transposed = buffer; |
| 576 | |
| 577 | do |
| 578 | { |
| 579 | /* Figure out the size of each block. */ |
| 580 | size_t x_size = (current.xmax() - current.x0()); |
| 581 | size_t k_size = (current.kmax() - current.k0()); |
| 582 | |
| 583 | /* Round sizes up as needed. */ |
| 584 | x_size = iceildiv(x_size, strategy::out_width); |
| 585 | x_size *= strategy::out_width; |
| 586 | |
| 587 | k_size = iceildiv(k_size, strategy::k_unroll); |
| 588 | k_size *= strategy::k_unroll; |
| 589 | |
| 590 | if(_trB ^ strategy::B_transpose) |
| 591 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 592 | Transform<strategy::B_interleave, strategy::B_block, true>( |
| 593 | buffer, B + (current.multi() * B_multi_stride), ldb, |
| 594 | current.x0(), current.xmax(), current.k0(), current.kmax()); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 595 | } |
| 596 | else |
| 597 | { |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 598 | Transform<strategy::B_interleave, strategy::B_block, false>( |
| 599 | buffer, B + (current.multi() * B_multi_stride), ldb, |
| 600 | current.x0(), current.xmax(), current.k0(), current.kmax()); |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 601 | } |
| 602 | |
| 603 | buffer += (x_size * k_size); |
| 604 | } |
| 605 | while(current.advance()); |
| 606 | } |
| 607 | |
Michalis Spyrou | e7e96e0 | 2018-04-13 13:44:10 +0100 | [diff] [blame^] | 608 | void set_pretransposed_B_data(void *in_buffer) override |
| 609 | { |
| 610 | _B_transposed = reinterpret_cast<Toi *>(in_buffer); |
| 611 | } |
| 612 | |
Pablo Tello | eb82fd2 | 2018-02-23 13:43:50 +0000 | [diff] [blame] | 613 | ~GemmInterleaved() override |
| 614 | { |
| 615 | delete _bm; |
| 616 | } |
| 617 | }; |
| 618 | |
| 619 | } // namespace arm_gemm |