blob: 34aaea0ef15bea618a4157ee9ce8a1abeeb73d7e [file] [log] [blame]
Anthony Barbier3d677cc2018-07-23 16:42:59 +01001/*
Georgios Pinitas7cd26d42019-01-09 18:35:17 +00002 * Copyright (c) 2018-2019 ARM Limited.
Anthony Barbier3d677cc2018-07-23 16:42:59 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h"
26
27#include "arm_compute/core/ITensor.h"
Anthony Barbierac314c22018-09-11 17:49:10 +010028#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
Anthony Barbier3d677cc2018-07-23 16:42:59 +010029#include "arm_compute/core/Utils.h"
30#include "arm_compute/runtime/NEON/NEScheduler.h"
31
Georgios Pinitas7cd26d42019-01-09 18:35:17 +000032#include "src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h"
33
Anthony Barbierff0bccf2018-11-30 10:42:40 +000034#include <atomic>
35#include <condition_variable>
36#include <mutex>
37
Anthony Barbier3d677cc2018-07-23 16:42:59 +010038namespace arm_compute
39{
Anthony Barbierff0bccf2018-11-30 10:42:40 +000040#ifndef NO_MULTI_THREADING
41class BufferManagerMultipleThreads final : public IBufferManager
42{
43public:
44 /** Number of buffers to ping pong between */
45 static constexpr unsigned int NUM_BUFFERS = 3;
46
47 explicit BufferManagerMultipleThreads(unsigned int max_num_users)
48 : _max_num_users(max_num_users)
49 {
50 }
51 unsigned int num_buffers() const override
52 {
53 return NUM_BUFFERS;
54 }
55 /* - Lock the requested index if it's free and return true if it needs reshaping.
56 * - Return false without acquiring the lock if the buffer at the index is already reshaped / being reshaped.
57 * - Block if the corresponding buffer for the given index is still being used by a different index.
58 */
59 bool lock_to_reshape_if_needed(unsigned int index) override
60 {
61 Buffer &buf = get_buffer_from_index(index);
62 while(true)
63 {
64 if(buf.index == index && buf.state != State::FREE)
65 {
66 //Another thread already is reshaping / has reshaped this block: nothing to do
67 return false;
68 }
69 else
70 {
71 std::unique_lock<std::mutex> lock(buf.mutex);
72 //If the buffer is free then lock it for reshaping:
73 if(buf.state == State::FREE)
74 {
75 buf.index = index;
76 buf.state = State::BEING_RESHAPED;
77 return true;
78 }
79 // Check again just in case it changed while we were acquiring the lock:
80 if(buf.index == index)
81 {
82 //Another thread is reshaping this block already, nothing to do
83 return false;
84 }
85 // buf.index != index: Buffer still being used by another block, need to wait
86 buf.sem.wait(lock);
87 }
88 }
89 }
90 /* Mark the buffer at the given index as reshaped and release the lock acquired via lock_to_reshape_if_needed() */
91 void mark_as_reshaped(unsigned int index) override
92 {
93 Buffer &buf = get_buffer_from_index(index);
94 {
95 std::lock_guard<std::mutex> lock(buf.mutex);
96 buf.users = _max_num_users;
97 buf.state = State::IN_USE;
98 }
99 buf.sem.notify_all();
100 }
101
102 /* Block until the buffer at the given index is reshaped */
103 void wait_for_reshaping(unsigned int index) override
104 {
105 Buffer &buf = get_buffer_from_index(index);
106 ARM_COMPUTE_ERROR_ON(buf.index != index); // Should have blocked in lock_to_reshape_if_needed()
107 // Check if it's already ready to use:
108 if(buf.state == State::IN_USE)
109 return;
110 std::unique_lock<std::mutex> lock(buf.mutex);
111 //Double check it didn't change while we were acquiring the lock:
112 if(buf.state == State::IN_USE)
113 return;
114 buf.sem.wait(lock);
115 }
116 /* Mark the buffer at the given index as not used by this thread anymore.
117 * Once all the threads have called this method then the buffer is marked as free again.
118 */
119 void mark_as_unused(unsigned int index) override
120 {
121 Buffer &buf = get_buffer_from_index(index);
122 ARM_COMPUTE_ERROR_ON(buf.index != index); // Should have blocked in lock_to_reshape_if_needed()
123 if(--buf.users == 0)
124 {
125 std::unique_lock<std::mutex> lock(buf.mutex);
126 buf.state = State::FREE;
127 lock.unlock();
128 buf.sem.notify_all();
129 }
130 }
131
132private:
133 enum class State
134 {
135 FREE,
136 BEING_RESHAPED,
137 IN_USE
138 };
139 struct Buffer
140 {
141 unsigned int index{};
142 std::atomic_uint users{};
143 State state{ State::FREE };
144 std::mutex mutex{};
145 std::condition_variable sem{};
146 } _buffers[NUM_BUFFERS];
147 Buffer &get_buffer_from_index(unsigned int index)
148 {
149 return _buffers[index % NUM_BUFFERS];
150 }
151 unsigned int _max_num_users;
152};
153#endif /* NO_MULTI_THREADING */
154
155class BufferManagerSingleThread : public IBufferManager
156{
157public:
158 unsigned int num_buffers() const override
159 {
160 return 1;
161 }
162 bool lock_to_reshape_if_needed(unsigned int index) override
163 {
164 return true;
165 }
166 void mark_as_reshaped(unsigned int index) override
167 {
168 }
169 void wait_for_reshaping(unsigned int index) override
170 {
171 }
172 void mark_as_unused(unsigned int index) override
173 {
174 }
175};
176
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100177NEGEMMInterleavedWrapper::NEGEMMInterleavedWrapper(std::shared_ptr<IMemoryManager> memory_manager)
178 : _memory_group(std::move(memory_manager))
179{
180}
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000181
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100182void NEGEMMInterleavedWrapper::run()
183{
184 prepare();
185
186 _memory_group.acquire();
Anthony Barbierac314c22018-09-11 17:49:10 +0100187 NEScheduler::get().run_tagged_workloads(_workloads, _tag.c_str());
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100188 _memory_group.release();
189}
190
191void NEGEMMInterleavedWrapper::prepare()
192{
193 if(!_is_prepared)
194 {
195 if(_pretranspose_b)
196 {
Georgios Pinitasca1250d2018-11-22 19:38:27 +0000197 _transformed_b.allocator()->allocate();
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100198 NEScheduler::get().schedule(_prepare_b.get(), Window::DimX);
199 _b->mark_as_unused();
200 }
201 else
202 {
203 _prepare_b->create_workloads(_b_workloads);
204 }
205 _transform_a->create_workloads(_a_workloads);
206 _matrix_multiply->create_workloads(_mm_workloads);
207
208 //Maximum number of workloads to create:
209 const unsigned int num_threads = NEScheduler::get().num_threads();
Georgios Pinitas1509e4b2019-01-28 10:01:50 +0000210 const unsigned int max_iterations = std::max(num_threads, _num_windows);
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100211 //Maximum number of iterations the parameters allow:
212 const unsigned int num_iterations = _batch_window.num_iterations_total();
213 // Keep the smallest of the two:
214 const unsigned int num_windows = std::min(num_iterations, max_iterations);
215 const TensorShape window_shape = _batch_window.shape();
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000216 const unsigned int num_x_blocks = _block_walker.num_iterations(Window::DimX);
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100217
218 // Create a 1D window to dynamically split the batch window:
219 Window win_1D;
220 win_1D.set(0, Window::Dimension(0, num_iterations));
221
222 // Create one workload for each sub-window:
223 for(unsigned int w = 0; w < num_windows; w++)
224 {
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000225 Window win = win_1D.split_window(0, w, num_windows);
226 const Coordinates start_offset = index2coords(window_shape, win.x().start());
227 const Coordinates end_offset = index2coords(window_shape, win.x().end() - 1);
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100228
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000229 if(_pretranspose_b)
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100230 {
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000231 auto workload = [start_offset, end_offset, num_x_blocks, this](const ThreadInfo & info)
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100232 {
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000233 //For each block of rows in "M"
234 auto workload_mm = this->_mm_workloads.begin();
235 for(auto workload_a = this->_a_workloads.begin(); workload_a != this->_a_workloads.end(); workload_a++)
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100236 {
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000237 // Transform one k_block from A:
238 this->_transform_a->transform(*workload_a, info, this->_batch_window, start_offset, end_offset);
239 // Then perform the matrix multiplication for each x block along N:
240 for(unsigned int i = 0; i < num_x_blocks; i++)
241 {
242 ARM_COMPUTE_ERROR_ON(workload_mm == this->_mm_workloads.end());
243 this->_matrix_multiply->transform(*workload_mm++, info, this->_batch_window, start_offset, end_offset);
244 }
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100245 }
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000246 };
247 _workloads.push_back(workload);
248 }
249 else
250 {
251 auto workload = [num_threads, start_offset, end_offset, num_x_blocks, this](const ThreadInfo & info)
252 {
253 //For each block of rows in "M"
254 auto workload_mm = this->_mm_workloads.begin();
255 unsigned int workload_b = 0;
256 //If there is only one thread then only reshape the B blocks as you need them:
257 unsigned int workload_b_next = num_threads == 1 ? this->_b_workloads.size() : 1;
258
259 for(auto workload_a = this->_a_workloads.begin(); workload_a != this->_a_workloads.end(); workload_a++)
260 {
261 // Transform one k_block from A:
262 this->_transform_a->transform(*workload_a, info, this->_batch_window, start_offset, end_offset);
263 // Then perform the matrix multiplication for each x block along N:
264 for(unsigned int i = 0; i < num_x_blocks; i++)
265 {
266 ARM_COMPUTE_ERROR_ON(workload_mm == this->_mm_workloads.end());
267 if(workload_b_next < this->_b_workloads.size())
268 {
269 //Lock on BufferManager: need to run it ?
270 if(this->_buffer_manager->lock_to_reshape_if_needed(workload_b_next))
271 {
272 this->_prepare_b->transform(this->_b_workloads[workload_b_next], info);
273 this->_buffer_manager->mark_as_reshaped(workload_b_next);
274 }
275 workload_b_next++;
276 }
277 ARM_COMPUTE_ERROR_ON(workload_b >= this->_b_workloads.size());
278 // Run if needed or wait
279 if(this->_buffer_manager->lock_to_reshape_if_needed(workload_b))
280 {
281 this->_prepare_b->transform(this->_b_workloads[workload_b], info);
282 this->_buffer_manager->mark_as_reshaped(workload_b);
283 }
284 this->_buffer_manager->wait_for_reshaping(workload_b);
285 this->_matrix_multiply->transform(*workload_mm++, info, this->_batch_window, start_offset, end_offset);
286 this->_buffer_manager->mark_as_unused(workload_b);
287 workload_b++;
288 }
289 }
290 };
291 _workloads.push_back(workload);
292 }
293 }
294 if(!_pretranspose_b && num_windows > 1 && num_windows % num_threads != 0)
295 {
296 //Make sure the number of workloads is a multiple of the number of threads to avoid dead locks:
297 for(unsigned int leftover = num_windows % num_threads; leftover != num_threads; leftover++)
298 {
299 auto workload = [this](const ThreadInfo & info)
300 {
301 unsigned int workload_b = 0;
302 //If there is only one thread then only reshape the B blocks as you need them:
303 unsigned int workload_b_next = 1;
304
305 for(unsigned int iteration = 0; iteration < this->_mm_workloads.size(); iteration++)
306 {
307 if(workload_b_next < this->_b_workloads.size())
308 {
309 //Lock on BufferManager: need to run it ?
310 if(this->_buffer_manager->lock_to_reshape_if_needed(workload_b_next))
311 {
312 this->_prepare_b->transform(this->_b_workloads[workload_b_next], info);
313 this->_buffer_manager->mark_as_reshaped(workload_b_next);
314 }
315 workload_b_next++;
316 }
317 ARM_COMPUTE_ERROR_ON(workload_b >= this->_b_workloads.size());
318 // Run if needed or wait
319 if(this->_buffer_manager->lock_to_reshape_if_needed(workload_b))
320 {
321 this->_prepare_b->transform(this->_b_workloads[workload_b], info);
322 this->_buffer_manager->mark_as_reshaped(workload_b);
323 }
324 this->_buffer_manager->wait_for_reshaping(workload_b);
325 this->_buffer_manager->mark_as_unused(workload_b);
326 workload_b++;
327 }
328 };
329 _workloads.push_back(workload);
330 }
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100331 }
332
333 _is_prepared = true;
334 }
335}
336
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000337void NEGEMMInterleavedWrapper::configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta, bool pretranspose_b)
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100338{
339 _params = INEGEMMWrapperKernel::extract_parameters(a, b, c);
340 _a = a;
341 _b = b;
342 _c = c;
343 _pretranspose_b = pretranspose_b;
344
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000345 const DataType input_type = a->info()->data_type();
346 const CPUInfo &ci = NEScheduler::get().cpu_info();
347 const unsigned int num_threads = NEScheduler::get().num_threads();
348
349 const arm_gemm::KernelDescription gemm_kernel_info = get_gemm_info(input_type, ci, num_threads, _params, alpha, beta, pretranspose_b);
350 ARM_COMPUTE_ERROR_ON(gemm_kernel_info.method != arm_gemm::GemmMethod::GEMM_INTERLEAVED);
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100351
352 // Forcing 128-byte alignment (required by 32-bit kernels)
353 const unsigned int alignment = 128;
354 _transformed_b.allocator()->init(TensorInfo{}, alignment);
355 _tmp_c.allocator()->init(TensorInfo{}, alignment);
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000356 _tag = "NEGEMMInterleaved_" + gemm_kernel_info.name;
357
358 // Get strategy
359 std::unique_ptr<detail::IInterleavedStrategy> strategy = detail::create_strategy(gemm_kernel_info.name);
Georgios Pinitas1509e4b2019-01-28 10:01:50 +0000360 _num_windows = iceildiv(_params.M, strategy->out_height()) * _params.batches;
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000361 ARM_COMPUTE_ERROR_ON(strategy == nullptr);
Anthony Barbierac314c22018-09-11 17:49:10 +0100362
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100363 if(!_pretranspose_b)
364 {
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000365 _block_sizes = strategy->calculate_block_sizes_for_strategy(ci, _params);
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000366 _batch_window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_block_sizes.m_round, _block_sizes.strategy_out_height), _block_sizes.strategy_out_height));
367 _batch_window.set(Window::DimY, Window::Dimension(0, _params.batches));
368 // If the execution is single threaded or has only one window then the buffer manager only needs 1 buffer else we will use NUM_BUFFERS buffers and ping pong between them:
369 const unsigned int num_iterations = _batch_window.num_iterations_total();
370 if(NEScheduler::get().num_threads() == 1 || num_iterations == 1)
371 {
372 _buffer_manager = support::cpp14::make_unique<BufferManagerSingleThread>();
373 }
374 else
375 {
376#ifdef NO_MULTI_THREADING
377 ARM_COMPUTE_ERROR("Can't have more than 1 buffer without multiple threads");
378#else /* NO_MULTI_THREADING */
379 _buffer_manager = support::cpp14::make_unique<BufferManagerMultipleThreads>(NEScheduler::get().num_threads());
380#endif /* NO_MULTI_THREADING */
381 }
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100382 // If B is transposed at every iteration then transformed_B can be managed:
383 _memory_group.manage(&_transformed_b);
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000384 auto_init_if_empty(*_transformed_b.info(), _b->info()->clone()->set_tensor_shape(TensorShape(_block_sizes.x_block * _block_sizes.k_block, _buffer_manager->num_buffers())));
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100385 }
Anthony Barbierac314c22018-09-11 17:49:10 +0100386 else
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100387 {
Anthony Barbierac314c22018-09-11 17:49:10 +0100388 _tag += "_preB";
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000389 }
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000390
391 _prepare_b = strategy->instantiate_prepareB(b, &_transformed_b, _params, ci);
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000392 ARM_COMPUTE_ERROR_ON(_prepare_b == nullptr);
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100393
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000394 if(_pretranspose_b)
395 {
Anthony Barbierac314c22018-09-11 17:49:10 +0100396 _block_sizes = _prepare_b->block_sizes();
Anthony Barbierff0bccf2018-11-30 10:42:40 +0000397 _batch_window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_block_sizes.m_round, _block_sizes.strategy_out_height), _block_sizes.strategy_out_height));
398 _batch_window.set(Window::DimY, Window::Dimension(0, _params.batches));
Anthony Barbierac314c22018-09-11 17:49:10 +0100399 }
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100400
401 _block_walker.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_params.N, _block_sizes.x_block), _block_sizes.x_block));
402 _block_walker.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_params.K, _block_sizes.k_block), _block_sizes.k_block));
403 _block_walker.set(Window::DimZ, Window::Dimension(0, _params.multis));
404
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100405 _transformed_a.allocator()->init(TensorInfo(TensorShape{ _block_sizes.k_block, _block_sizes.m_round, _params.batches }, 1, input_type), alignment);
406 _memory_group.manage(&_transformed_a);
407 _memory_group.manage(&_tmp_c);
408
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000409 _transform_a = strategy->instantiate_transformA(_a, &_transformed_a, _block_walker, _params);
410 _matrix_multiply = strategy->instantiate_matrix_multiply(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, alpha, beta, pretranspose_b, num_threads);
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100411 ARM_COMPUTE_ERROR_ON(_transform_a == nullptr);
412 ARM_COMPUTE_ERROR_ON(_matrix_multiply == nullptr);
Georgios Pinitas7cd26d42019-01-09 18:35:17 +0000413
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100414 _transformed_a.allocator()->allocate();
415 _tmp_c.allocator()->allocate();
Georgios Pinitasca1250d2018-11-22 19:38:27 +0000416 if(!_pretranspose_b)
417 {
418 _transformed_b.allocator()->allocate();
419 }
Anthony Barbier3d677cc2018-07-23 16:42:59 +0100420}
421} // namespace arm_compute