blob: db551590ea2fdd2e25af7ea8d8f69aad7c26b900 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Joseph Dobson6f8b17d2020-02-11 19:32:11 +00002 * Copyright (c) 2016-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CPP/CPPScheduler.h"
25
26#include "arm_compute/core/CPP/ICPPKernel.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/Utils.h"
Pablo Tello7fad9b12018-03-14 17:55:27 +000030#include "arm_compute/runtime/CPUUtils.h"
Pablo Tello27251972019-09-19 16:39:04 +010031#include "support/Mutex.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
Anthony Barbierd89940e2018-06-28 13:39:35 +010033#include <atomic>
Moritz Pflanzerff06f202017-09-08 13:48:23 +010034#include <condition_variable>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035#include <iostream>
Georgios Pinitas12833d02019-07-25 13:31:10 +010036#include <list>
Moritz Pflanzerff06f202017-09-08 13:48:23 +010037#include <mutex>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038#include <system_error>
39#include <thread>
40
Moritz Pflanzerff06f202017-09-08 13:48:23 +010041namespace arm_compute
42{
Anthony Barbier52ecb062018-05-25 13:32:10 +010043namespace
44{
45class ThreadFeeder
46{
47public:
48 /** Constructor
49 *
50 * @param[in] start First value that will be returned by the feeder
51 * @param[in] end End condition (The last value returned by get_next() will be end - 1)
52 */
53 explicit ThreadFeeder(unsigned int start = 0, unsigned int end = 0)
Anthony Barbierd89940e2018-06-28 13:39:35 +010054 : _atomic_counter(start), _end(end)
Anthony Barbier52ecb062018-05-25 13:32:10 +010055 {
56 }
57 /** Return the next element in the range if there is one.
58 *
59 * @param[out] next Will contain the next element if there is one.
60 *
61 * @return False if the end of the range has been reached and next wasn't set.
62 */
63 bool get_next(unsigned int &next)
64 {
Anthony Barbierd89940e2018-06-28 13:39:35 +010065 next = atomic_fetch_add_explicit(&_atomic_counter, 1u, std::memory_order_relaxed);
66 return next < _end;
Anthony Barbier52ecb062018-05-25 13:32:10 +010067 }
68
69private:
Anthony Barbierd89940e2018-06-28 13:39:35 +010070 std::atomic_uint _atomic_counter;
Anthony Barbier52ecb062018-05-25 13:32:10 +010071 const unsigned int _end;
Anthony Barbier52ecb062018-05-25 13:32:10 +010072};
73
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000074/** Given two dimensions and a maxium number of threads to utilise, calcualte the best
75 * combination of threads that fit in (mutliplied together) max_threads.
76 *
77 * This algorithm assumes that work in either of the dimensions is equally difficult
78 * to compute
79 *
80 * @returns [m_nthreads, n_nthreads] A pair of the threads that should be used in each dimension
81 */
82std::pair<unsigned, unsigned> split_2d(unsigned max_threads, std::size_t m, std::size_t n)
83{
84 /*
85 * We want the same ratio of threads in M & N to the ratio of m and n problem size
86 *
87 * Therefore: mt/nt == m/n where mt*nt == max_threads
88 *
89 * max_threads/nt = mt & (max_threads/nt) * (m/n) = nt
90 * nt^2 = max_threads * (m/n)
91 * nt = sqrt( max_threads * (m/n) )
92 */
93 //ratio of m to n in problem dimensions
94 double ratio = m / static_cast<double>(n);
95
96 // nt = sqrt(max_threads * (m / n) )
97 const unsigned adjusted = std::round(
Michalis Spyroubcd23522020-05-21 15:02:36 +010098 std::sqrt(max_threads * ratio));
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000099
100 //find the nearest factor of max_threads
Michalis Spyroubcd23522020-05-21 15:02:36 +0100101 for(unsigned i = 0; i != adjusted; ++i)
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000102 {
103 //try down
104 const unsigned adj_down = adjusted - i;
105 if(max_threads % adj_down == 0)
106 {
107 return { adj_down, max_threads / adj_down };
108 }
109
110 //try up
111 const unsigned adj_up = adjusted + i;
112 if(max_threads % adj_up == 0)
113 {
114 return { adj_up, max_threads / adj_up };
115 }
116 }
117
118 //we didn't find anything so lets bail out with maxes biased to the largest dimension
119 if(m > n)
120 {
Michalis Spyroubcd23522020-05-21 15:02:36 +0100121 return { std::min<unsigned>(m, max_threads), 1 };
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000122 }
123 else
124 {
Michalis Spyroubcd23522020-05-21 15:02:36 +0100125 return { 1, std::min<unsigned>(n, max_threads) };
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000126 }
127}
128
Anthony Barbier52ecb062018-05-25 13:32:10 +0100129/** Execute workloads[info.thread_id] first, then call the feeder to get the index of the next workload to run.
130 *
131 * Will run workloads until the feeder reaches the end of its range.
132 *
133 * @param[in] workloads The array of workloads
134 * @param[in,out] feeder The feeder indicating which workload to execute next.
135 * @param[in] info Threading and CPU info.
136 */
137void process_workloads(std::vector<IScheduler::Workload> &workloads, ThreadFeeder &feeder, const ThreadInfo &info)
138{
139 unsigned int workload_index = info.thread_id;
140 do
141 {
142 ARM_COMPUTE_ERROR_ON(workload_index >= workloads.size());
143 workloads[workload_index](info);
144 }
145 while(feeder.get_next(workload_index));
146}
Anthony Barbier52ecb062018-05-25 13:32:10 +0100147} //namespace
148
Pablo Tello27251972019-09-19 16:39:04 +0100149struct CPPScheduler::Impl final
Georgios Pinitas12833d02019-07-25 13:31:10 +0100150{
Pablo Tello27251972019-09-19 16:39:04 +0100151 explicit Impl(unsigned int thread_hint)
Georgios Pinitas12833d02019-07-25 13:31:10 +0100152 : _num_threads(thread_hint), _threads(_num_threads - 1)
153 {
154 }
155 void set_num_threads(unsigned int num_threads, unsigned int thead_hint)
156 {
157 _num_threads = num_threads == 0 ? thead_hint : num_threads;
158 _threads.resize(_num_threads - 1);
159 }
160 unsigned int num_threads() const
161 {
162 return _num_threads;
163 }
164
165 void run_workloads(std::vector<IScheduler::Workload> &workloads);
166
167 class Thread;
168
Pablo Tello27251972019-09-19 16:39:04 +0100169 unsigned int _num_threads;
170 std::list<Thread> _threads;
171 arm_compute::Mutex _run_workloads_mutex{};
Georgios Pinitas12833d02019-07-25 13:31:10 +0100172};
173
Pablo Tello27251972019-09-19 16:39:04 +0100174class CPPScheduler::Impl::Thread final
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100175{
176public:
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100177 /** Start a new thread. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100178 Thread();
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100179
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100180 Thread(const Thread &) = delete;
181 Thread &operator=(const Thread &) = delete;
182 Thread(Thread &&) = delete;
183 Thread &operator=(Thread &&) = delete;
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100184
185 /** Destructor. Make the thread join. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100186 ~Thread();
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100187
Anthony Barbier52ecb062018-05-25 13:32:10 +0100188 /** Request the worker thread to start executing workloads.
189 *
190 * The thread will start by executing workloads[info.thread_id] and will then call the feeder to
191 * get the index of the following workload to run.
192 *
193 * @note This function will return as soon as the workloads have been sent to the worker thread.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100194 * wait() needs to be called to ensure the execution is complete.
195 */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100196 void start(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info);
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100197
198 /** Wait for the current kernel execution to complete. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100199 void wait();
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100200
201 /** Function ran by the worker thread. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100202 void worker_thread();
203
204private:
Anthony Barbier52ecb062018-05-25 13:32:10 +0100205 std::thread _thread{};
206 ThreadInfo _info{};
207 std::vector<IScheduler::Workload> *_workloads{ nullptr };
208 ThreadFeeder *_feeder{ nullptr };
209 std::mutex _m{};
210 std::condition_variable _cv{};
211 bool _wait_for_work{ false };
212 bool _job_complete{ true };
213 std::exception_ptr _current_exception{ nullptr };
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100214};
215
Georgios Pinitas12833d02019-07-25 13:31:10 +0100216CPPScheduler::Impl::Thread::Thread()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100217{
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100218 _thread = std::thread(&Thread::worker_thread, this);
219}
220
Georgios Pinitas12833d02019-07-25 13:31:10 +0100221CPPScheduler::Impl::Thread::~Thread()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100222{
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100223 // Make sure worker thread has ended
224 if(_thread.joinable())
225 {
Anthony Barbier52ecb062018-05-25 13:32:10 +0100226 ThreadFeeder feeder;
227 start(nullptr, feeder, ThreadInfo());
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100228 _thread.join();
229 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100230}
231
Georgios Pinitas12833d02019-07-25 13:31:10 +0100232void CPPScheduler::Impl::Thread::start(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100233{
Anthony Barbier52ecb062018-05-25 13:32:10 +0100234 _workloads = workloads;
235 _feeder = &feeder;
236 _info = info;
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100237 {
238 std::lock_guard<std::mutex> lock(_m);
239 _wait_for_work = true;
240 _job_complete = false;
241 }
242 _cv.notify_one();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100243}
244
Georgios Pinitas12833d02019-07-25 13:31:10 +0100245void CPPScheduler::Impl::Thread::wait()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100246{
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100247 {
248 std::unique_lock<std::mutex> lock(_m);
249 _cv.wait(lock, [&] { return _job_complete; });
250 }
251
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100252 if(_current_exception)
253 {
254 std::rethrow_exception(_current_exception);
255 }
256}
257
Georgios Pinitas12833d02019-07-25 13:31:10 +0100258void CPPScheduler::Impl::Thread::worker_thread()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100259{
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100260 while(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100261 {
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100262 std::unique_lock<std::mutex> lock(_m);
263 _cv.wait(lock, [&] { return _wait_for_work; });
264 _wait_for_work = false;
265
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100266 _current_exception = nullptr;
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100267
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100268 // Time to exit
Anthony Barbier52ecb062018-05-25 13:32:10 +0100269 if(_workloads == nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100270 {
271 return;
272 }
273
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000274#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100275 try
276 {
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000277#endif /* ARM_COMPUTE_EXCEPTIONS_ENABLED */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100278 process_workloads(*_workloads, *_feeder, _info);
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000279
280#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100281 }
282 catch(...)
283 {
284 _current_exception = std::current_exception();
285 }
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000286#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100287 _job_complete = true;
288 lock.unlock();
289 _cv.notify_one();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100290 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100291}
292
Georgios Pinitas12833d02019-07-25 13:31:10 +0100293/*
294 * This singleton has been deprecated and will be removed in the next release
295 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100296CPPScheduler &CPPScheduler::get()
297{
298 static CPPScheduler scheduler;
299 return scheduler;
300}
301
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100302CPPScheduler::CPPScheduler()
Georgios Pinitas12833d02019-07-25 13:31:10 +0100303 : _impl(support::cpp14::make_unique<Impl>(num_threads_hint()))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100304{
305}
306
Georgios Pinitas12833d02019-07-25 13:31:10 +0100307CPPScheduler::~CPPScheduler() = default;
308
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100309void CPPScheduler::set_num_threads(unsigned int num_threads)
310{
Pablo Tello27251972019-09-19 16:39:04 +0100311 // No changes in the number of threads while current workloads are running
312 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
Georgios Pinitas12833d02019-07-25 13:31:10 +0100313 _impl->set_num_threads(num_threads, num_threads_hint());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100314}
315
Moritz Pflanzerd929b9c2017-06-28 10:15:48 +0100316unsigned int CPPScheduler::num_threads() const
317{
Georgios Pinitas12833d02019-07-25 13:31:10 +0100318 return _impl->num_threads();
Moritz Pflanzerd929b9c2017-06-28 10:15:48 +0100319}
320
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000321#ifndef DOXYGEN_SKIP_THIS
Anthony Barbier52ecb062018-05-25 13:32:10 +0100322void CPPScheduler::run_workloads(std::vector<IScheduler::Workload> &workloads)
323{
Pablo Tello27251972019-09-19 16:39:04 +0100324 // Mutex to ensure other threads won't interfere with the setup of the current thread's workloads
325 // Other thread's workloads will be scheduled after the current thread's workloads have finished
326 // This is not great because different threads workloads won't run in parallel but at least they
327 // won't interfere each other and deadlock.
328 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
329 const unsigned int num_threads = std::min(_impl->num_threads(), static_cast<unsigned int>(workloads.size()));
Anthony Barbier52ecb062018-05-25 13:32:10 +0100330 if(num_threads < 1)
331 {
332 return;
333 }
334 ThreadFeeder feeder(num_threads, workloads.size());
335 ThreadInfo info;
336 info.cpu_info = &_cpu_info;
337 info.num_threads = num_threads;
338 unsigned int t = 0;
Georgios Pinitas12833d02019-07-25 13:31:10 +0100339 auto thread_it = _impl->_threads.begin();
Anthony Barbier52ecb062018-05-25 13:32:10 +0100340 for(; t < num_threads - 1; ++t, ++thread_it)
341 {
342 info.thread_id = t;
343 thread_it->start(&workloads, feeder, info);
344 }
345
346 info.thread_id = t;
347 process_workloads(workloads, feeder, info);
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000348#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier52ecb062018-05-25 13:32:10 +0100349 try
350 {
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000351#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
Georgios Pinitas12833d02019-07-25 13:31:10 +0100352 for(auto &thread : _impl->_threads)
Anthony Barbier52ecb062018-05-25 13:32:10 +0100353 {
354 thread.wait();
355 }
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000356#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier52ecb062018-05-25 13:32:10 +0100357 }
358 catch(const std::system_error &e)
359 {
360 std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
361 }
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000362#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100363}
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000364#endif /* DOXYGEN_SKIP_THIS */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100365
Michalis Spyroubcd23522020-05-21 15:02:36 +0100366void CPPScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, std::vector<InputOperatorTensors *> &inputs, std::vector<OutputOperatorTensors *> &outputs)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100367{
368 ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
369
Michalis Spyroubcd23522020-05-21 15:02:36 +0100370 const Window &max_window = kernel->window();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100371
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000372 if(hints.split_dimension() == IScheduler::split_dimensions_all)
Moritz Pflanzer2fd5d952017-09-24 12:10:46 +0100373 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000374 /*
375 * if the split dim is size_t max then this signals we should parallelise over
376 * all dimensions
377 */
378 const std::size_t m = max_window.num_iterations(Window::DimX);
379 const std::size_t n = max_window.num_iterations(Window::DimY);
Moritz Pflanzer2fd5d952017-09-24 12:10:46 +0100380
Michalis Spyroubcd23522020-05-21 15:02:36 +0100381 //in c++17 this can be swapped for auto [ m_threads, n_threads ] = split_2d(...
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000382 unsigned m_threads, n_threads;
383 std::tie(m_threads, n_threads) = split_2d(_impl->_num_threads, m, n);
384
385 std::vector<IScheduler::Workload> workloads;
Michalis Spyroubcd23522020-05-21 15:02:36 +0100386 for(unsigned int ni = 0; ni != n_threads; ++ni)
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000387 {
Michalis Spyroubcd23522020-05-21 15:02:36 +0100388 for(unsigned int mi = 0; mi != m_threads; ++mi)
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000389 {
390 workloads.push_back(
Michalis Spyroubcd23522020-05-21 15:02:36 +0100391 [ni, mi, m_threads, n_threads, &max_window, &kernel](const ThreadInfo & info)
392 {
393 //narrow the window to our mi-ni workload
394 Window win = max_window.split_window(Window::DimX, mi, m_threads)
395 .split_window(Window::DimY, ni, n_threads);
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000396
Michalis Spyroubcd23522020-05-21 15:02:36 +0100397 win.validate();
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000398
Michalis Spyroubcd23522020-05-21 15:02:36 +0100399 Window thread_locator;
400 thread_locator.set(Window::DimX, Window::Dimension(mi, m_threads));
401 thread_locator.set(Window::DimY, Window::Dimension(ni, n_threads));
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000402
Michalis Spyroubcd23522020-05-21 15:02:36 +0100403 thread_locator.validate();
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000404
Michalis Spyroubcd23522020-05-21 15:02:36 +0100405 kernel->run_nd(win, info, thread_locator);
406 });
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000407 }
408 }
409 run_workloads(workloads);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100410 }
411 else
412 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000413 const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
414 const unsigned int num_threads = std::min(num_iterations, _impl->_num_threads);
415
416 if(num_iterations == 0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100417 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000418 return;
419 }
420
421 if(!kernel->is_parallelisable() || num_threads == 1)
422 {
423 ThreadInfo info;
424 info.cpu_info = &_cpu_info;
Michalis Spyroubcd23522020-05-21 15:02:36 +0100425 if(inputs.empty())
426 {
427 kernel->run(max_window, info);
428 }
429 else
430 {
431 kernel->run_op(inputs, outputs, max_window, info);
432 }
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000433 }
434 else
435 {
436 unsigned int num_windows = 0;
437 switch(hints.strategy())
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100438 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000439 case StrategyHint::STATIC:
440 num_windows = num_threads;
441 break;
442 case StrategyHint::DYNAMIC:
443 {
444 const unsigned int granule_threshold = (hints.threshold() <= 0) ? num_threads : static_cast<unsigned int>(hints.threshold());
445 // Make sure we don't use some windows which are too small as this might create some contention on the ThreadFeeder
446 num_windows = num_iterations > granule_threshold ? granule_threshold : num_iterations;
447 break;
448 }
449 default:
450 ARM_COMPUTE_ERROR("Unknown strategy");
Anthony Barbier376c85f2018-05-25 14:17:21 +0100451 }
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000452 std::vector<IScheduler::Workload> workloads(num_windows);
453 for(unsigned int t = 0; t < num_windows; t++)
Anthony Barbier376c85f2018-05-25 14:17:21 +0100454 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000455 //Capture 't' by copy, all the other variables by reference:
Michalis Spyroubcd23522020-05-21 15:02:36 +0100456 workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &inputs, &outputs](const ThreadInfo & info)
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000457 {
458 Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
459 win.validate();
Michalis Spyroubcd23522020-05-21 15:02:36 +0100460
461 if(inputs.empty())
462 {
463 kernel->run(win, info);
464 }
465 else
466 {
467 kernel->run_op(inputs, outputs, win, info);
468 }
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000469 };
470 }
471 run_workloads(workloads);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100472 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100473 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100474}
Michalis Spyroubcd23522020-05-21 15:02:36 +0100475
476void CPPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, std::vector<InputOperatorTensors *> &inputs, std::vector<OutputOperatorTensors *> &outputs)
477{
478 schedule_common(kernel, hints, inputs, outputs);
479}
480
481void CPPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
482{
483 std::vector<InputOperatorTensors *> inputs;
484 std::vector<OutputOperatorTensors *> outputs;
485 schedule_common(kernel, hints, inputs, outputs);
486}
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100487} // namespace arm_compute