blob: 0a03497cb94c829dec5412f18d2314c06b23e40f [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Joseph Dobson6f8b17d2020-02-11 19:32:11 +00002 * Copyright (c) 2016-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CPP/CPPScheduler.h"
25
26#include "arm_compute/core/CPP/ICPPKernel.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/Utils.h"
Pablo Tello7fad9b12018-03-14 17:55:27 +000030#include "arm_compute/runtime/CPUUtils.h"
Pablo Tello27251972019-09-19 16:39:04 +010031#include "support/Mutex.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
Anthony Barbierd89940e2018-06-28 13:39:35 +010033#include <atomic>
Moritz Pflanzerff06f202017-09-08 13:48:23 +010034#include <condition_variable>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035#include <iostream>
Georgios Pinitas12833d02019-07-25 13:31:10 +010036#include <list>
Moritz Pflanzerff06f202017-09-08 13:48:23 +010037#include <mutex>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038#include <system_error>
39#include <thread>
40
Moritz Pflanzerff06f202017-09-08 13:48:23 +010041namespace arm_compute
42{
Anthony Barbier52ecb062018-05-25 13:32:10 +010043namespace
44{
45class ThreadFeeder
46{
47public:
48 /** Constructor
49 *
50 * @param[in] start First value that will be returned by the feeder
51 * @param[in] end End condition (The last value returned by get_next() will be end - 1)
52 */
53 explicit ThreadFeeder(unsigned int start = 0, unsigned int end = 0)
Anthony Barbierd89940e2018-06-28 13:39:35 +010054 : _atomic_counter(start), _end(end)
Anthony Barbier52ecb062018-05-25 13:32:10 +010055 {
56 }
57 /** Return the next element in the range if there is one.
58 *
59 * @param[out] next Will contain the next element if there is one.
60 *
61 * @return False if the end of the range has been reached and next wasn't set.
62 */
63 bool get_next(unsigned int &next)
64 {
Anthony Barbierd89940e2018-06-28 13:39:35 +010065 next = atomic_fetch_add_explicit(&_atomic_counter, 1u, std::memory_order_relaxed);
66 return next < _end;
Anthony Barbier52ecb062018-05-25 13:32:10 +010067 }
68
69private:
Anthony Barbierd89940e2018-06-28 13:39:35 +010070 std::atomic_uint _atomic_counter;
Anthony Barbier52ecb062018-05-25 13:32:10 +010071 const unsigned int _end;
Anthony Barbier52ecb062018-05-25 13:32:10 +010072};
73
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000074/** Given two dimensions and a maxium number of threads to utilise, calcualte the best
75 * combination of threads that fit in (mutliplied together) max_threads.
76 *
77 * This algorithm assumes that work in either of the dimensions is equally difficult
78 * to compute
79 *
80 * @returns [m_nthreads, n_nthreads] A pair of the threads that should be used in each dimension
81 */
82std::pair<unsigned, unsigned> split_2d(unsigned max_threads, std::size_t m, std::size_t n)
83{
84 /*
85 * We want the same ratio of threads in M & N to the ratio of m and n problem size
86 *
87 * Therefore: mt/nt == m/n where mt*nt == max_threads
88 *
89 * max_threads/nt = mt & (max_threads/nt) * (m/n) = nt
90 * nt^2 = max_threads * (m/n)
91 * nt = sqrt( max_threads * (m/n) )
92 */
93 //ratio of m to n in problem dimensions
94 double ratio = m / static_cast<double>(n);
95
96 // nt = sqrt(max_threads * (m / n) )
97 const unsigned adjusted = std::round(
98 std::sqrt(max_threads * ratio));
99
100 //find the nearest factor of max_threads
101 for(unsigned i = 0; i!= adjusted; ++i)
102 {
103 //try down
104 const unsigned adj_down = adjusted - i;
105 if(max_threads % adj_down == 0)
106 {
107 return { adj_down, max_threads / adj_down };
108 }
109
110 //try up
111 const unsigned adj_up = adjusted + i;
112 if(max_threads % adj_up == 0)
113 {
114 return { adj_up, max_threads / adj_up };
115 }
116 }
117
118 //we didn't find anything so lets bail out with maxes biased to the largest dimension
119 if(m > n)
120 {
121 return{ std::min<unsigned>(m, max_threads), 1 };
122 }
123 else
124 {
125 return{ 1, std::min<unsigned>(n, max_threads) };
126 }
127}
128
Anthony Barbier52ecb062018-05-25 13:32:10 +0100129/** Execute workloads[info.thread_id] first, then call the feeder to get the index of the next workload to run.
130 *
131 * Will run workloads until the feeder reaches the end of its range.
132 *
133 * @param[in] workloads The array of workloads
134 * @param[in,out] feeder The feeder indicating which workload to execute next.
135 * @param[in] info Threading and CPU info.
136 */
137void process_workloads(std::vector<IScheduler::Workload> &workloads, ThreadFeeder &feeder, const ThreadInfo &info)
138{
139 unsigned int workload_index = info.thread_id;
140 do
141 {
142 ARM_COMPUTE_ERROR_ON(workload_index >= workloads.size());
143 workloads[workload_index](info);
144 }
145 while(feeder.get_next(workload_index));
146}
147
148} //namespace
149
Pablo Tello27251972019-09-19 16:39:04 +0100150struct CPPScheduler::Impl final
Georgios Pinitas12833d02019-07-25 13:31:10 +0100151{
Pablo Tello27251972019-09-19 16:39:04 +0100152 explicit Impl(unsigned int thread_hint)
Georgios Pinitas12833d02019-07-25 13:31:10 +0100153 : _num_threads(thread_hint), _threads(_num_threads - 1)
154 {
155 }
156 void set_num_threads(unsigned int num_threads, unsigned int thead_hint)
157 {
158 _num_threads = num_threads == 0 ? thead_hint : num_threads;
159 _threads.resize(_num_threads - 1);
160 }
161 unsigned int num_threads() const
162 {
163 return _num_threads;
164 }
165
166 void run_workloads(std::vector<IScheduler::Workload> &workloads);
167
168 class Thread;
169
Pablo Tello27251972019-09-19 16:39:04 +0100170 unsigned int _num_threads;
171 std::list<Thread> _threads;
172 arm_compute::Mutex _run_workloads_mutex{};
Georgios Pinitas12833d02019-07-25 13:31:10 +0100173};
174
Pablo Tello27251972019-09-19 16:39:04 +0100175class CPPScheduler::Impl::Thread final
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100176{
177public:
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100178 /** Start a new thread. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100179 Thread();
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100180
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100181 Thread(const Thread &) = delete;
182 Thread &operator=(const Thread &) = delete;
183 Thread(Thread &&) = delete;
184 Thread &operator=(Thread &&) = delete;
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100185
186 /** Destructor. Make the thread join. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100187 ~Thread();
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100188
Anthony Barbier52ecb062018-05-25 13:32:10 +0100189 /** Request the worker thread to start executing workloads.
190 *
191 * The thread will start by executing workloads[info.thread_id] and will then call the feeder to
192 * get the index of the following workload to run.
193 *
194 * @note This function will return as soon as the workloads have been sent to the worker thread.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100195 * wait() needs to be called to ensure the execution is complete.
196 */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100197 void start(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info);
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100198
199 /** Wait for the current kernel execution to complete. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100200 void wait();
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100201
202 /** Function ran by the worker thread. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100203 void worker_thread();
204
205private:
Anthony Barbier52ecb062018-05-25 13:32:10 +0100206 std::thread _thread{};
207 ThreadInfo _info{};
208 std::vector<IScheduler::Workload> *_workloads{ nullptr };
209 ThreadFeeder *_feeder{ nullptr };
210 std::mutex _m{};
211 std::condition_variable _cv{};
212 bool _wait_for_work{ false };
213 bool _job_complete{ true };
214 std::exception_ptr _current_exception{ nullptr };
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100215};
216
Georgios Pinitas12833d02019-07-25 13:31:10 +0100217CPPScheduler::Impl::Thread::Thread()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100218{
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100219 _thread = std::thread(&Thread::worker_thread, this);
220}
221
Georgios Pinitas12833d02019-07-25 13:31:10 +0100222CPPScheduler::Impl::Thread::~Thread()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100223{
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100224 // Make sure worker thread has ended
225 if(_thread.joinable())
226 {
Anthony Barbier52ecb062018-05-25 13:32:10 +0100227 ThreadFeeder feeder;
228 start(nullptr, feeder, ThreadInfo());
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100229 _thread.join();
230 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100231}
232
Georgios Pinitas12833d02019-07-25 13:31:10 +0100233void CPPScheduler::Impl::Thread::start(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100234{
Anthony Barbier52ecb062018-05-25 13:32:10 +0100235 _workloads = workloads;
236 _feeder = &feeder;
237 _info = info;
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100238 {
239 std::lock_guard<std::mutex> lock(_m);
240 _wait_for_work = true;
241 _job_complete = false;
242 }
243 _cv.notify_one();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100244}
245
Georgios Pinitas12833d02019-07-25 13:31:10 +0100246void CPPScheduler::Impl::Thread::wait()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100247{
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100248 {
249 std::unique_lock<std::mutex> lock(_m);
250 _cv.wait(lock, [&] { return _job_complete; });
251 }
252
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100253 if(_current_exception)
254 {
255 std::rethrow_exception(_current_exception);
256 }
257}
258
Georgios Pinitas12833d02019-07-25 13:31:10 +0100259void CPPScheduler::Impl::Thread::worker_thread()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100260{
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100261 while(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100262 {
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100263 std::unique_lock<std::mutex> lock(_m);
264 _cv.wait(lock, [&] { return _wait_for_work; });
265 _wait_for_work = false;
266
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100267 _current_exception = nullptr;
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100268
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100269 // Time to exit
Anthony Barbier52ecb062018-05-25 13:32:10 +0100270 if(_workloads == nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100271 {
272 return;
273 }
274
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000275#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100276 try
277 {
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000278#endif /* ARM_COMPUTE_EXCEPTIONS_ENABLED */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100279 process_workloads(*_workloads, *_feeder, _info);
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000280
281#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100282 }
283 catch(...)
284 {
285 _current_exception = std::current_exception();
286 }
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000287#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100288 _job_complete = true;
289 lock.unlock();
290 _cv.notify_one();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100291 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100292}
293
Georgios Pinitas12833d02019-07-25 13:31:10 +0100294/*
295 * This singleton has been deprecated and will be removed in the next release
296 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100297CPPScheduler &CPPScheduler::get()
298{
299 static CPPScheduler scheduler;
300 return scheduler;
301}
302
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100303CPPScheduler::CPPScheduler()
Georgios Pinitas12833d02019-07-25 13:31:10 +0100304 : _impl(support::cpp14::make_unique<Impl>(num_threads_hint()))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100305{
306}
307
Georgios Pinitas12833d02019-07-25 13:31:10 +0100308CPPScheduler::~CPPScheduler() = default;
309
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100310void CPPScheduler::set_num_threads(unsigned int num_threads)
311{
Pablo Tello27251972019-09-19 16:39:04 +0100312 // No changes in the number of threads while current workloads are running
313 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
Georgios Pinitas12833d02019-07-25 13:31:10 +0100314 _impl->set_num_threads(num_threads, num_threads_hint());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100315}
316
Moritz Pflanzerd929b9c2017-06-28 10:15:48 +0100317unsigned int CPPScheduler::num_threads() const
318{
Georgios Pinitas12833d02019-07-25 13:31:10 +0100319 return _impl->num_threads();
Moritz Pflanzerd929b9c2017-06-28 10:15:48 +0100320}
321
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000322#ifndef DOXYGEN_SKIP_THIS
Anthony Barbier52ecb062018-05-25 13:32:10 +0100323void CPPScheduler::run_workloads(std::vector<IScheduler::Workload> &workloads)
324{
Pablo Tello27251972019-09-19 16:39:04 +0100325 // Mutex to ensure other threads won't interfere with the setup of the current thread's workloads
326 // Other thread's workloads will be scheduled after the current thread's workloads have finished
327 // This is not great because different threads workloads won't run in parallel but at least they
328 // won't interfere each other and deadlock.
329 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
330 const unsigned int num_threads = std::min(_impl->num_threads(), static_cast<unsigned int>(workloads.size()));
Anthony Barbier52ecb062018-05-25 13:32:10 +0100331 if(num_threads < 1)
332 {
333 return;
334 }
335 ThreadFeeder feeder(num_threads, workloads.size());
336 ThreadInfo info;
337 info.cpu_info = &_cpu_info;
338 info.num_threads = num_threads;
339 unsigned int t = 0;
Georgios Pinitas12833d02019-07-25 13:31:10 +0100340 auto thread_it = _impl->_threads.begin();
Anthony Barbier52ecb062018-05-25 13:32:10 +0100341 for(; t < num_threads - 1; ++t, ++thread_it)
342 {
343 info.thread_id = t;
344 thread_it->start(&workloads, feeder, info);
345 }
346
347 info.thread_id = t;
348 process_workloads(workloads, feeder, info);
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000349#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier52ecb062018-05-25 13:32:10 +0100350 try
351 {
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000352#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
Georgios Pinitas12833d02019-07-25 13:31:10 +0100353 for(auto &thread : _impl->_threads)
Anthony Barbier52ecb062018-05-25 13:32:10 +0100354 {
355 thread.wait();
356 }
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000357#ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
Anthony Barbier52ecb062018-05-25 13:32:10 +0100358 }
359 catch(const std::system_error &e)
360 {
361 std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
362 }
Michalis Spyrou323ce0f2018-11-30 16:30:43 +0000363#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100364}
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000365#endif /* DOXYGEN_SKIP_THIS */
Anthony Barbier52ecb062018-05-25 13:32:10 +0100366
Anthony Barbier376c85f2018-05-25 14:17:21 +0100367void CPPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100368{
369 ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
370
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100371 const Window &max_window = kernel->window();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100372
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000373 if(hints.split_dimension() == IScheduler::split_dimensions_all)
Moritz Pflanzer2fd5d952017-09-24 12:10:46 +0100374 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000375 /*
376 * if the split dim is size_t max then this signals we should parallelise over
377 * all dimensions
378 */
379 const std::size_t m = max_window.num_iterations(Window::DimX);
380 const std::size_t n = max_window.num_iterations(Window::DimY);
Moritz Pflanzer2fd5d952017-09-24 12:10:46 +0100381
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000382 //in c++17 this can be swapped for auto [ m_threads, n_threads ] = split_2d(...
383 unsigned m_threads, n_threads;
384 std::tie(m_threads, n_threads) = split_2d(_impl->_num_threads, m, n);
385
386 std::vector<IScheduler::Workload> workloads;
387 for(unsigned int ni = 0; ni != n_threads; ++ni)
388 {
389 for(unsigned int mi = 0; mi != m_threads; ++mi)
390 {
391 workloads.push_back(
392 [ ni, mi, m_threads, n_threads, &max_window, &kernel ]
393 (const ThreadInfo & info)
394 {
395 //narrow the window to our mi-ni workload
396 Window win = max_window.split_window(Window::DimX, mi, m_threads)
397 .split_window(Window::DimY, ni, n_threads);
398
399 win.validate();
400
401 Window thread_locator;
402 thread_locator.set(Window::DimX, Window::Dimension(mi, m_threads));
403 thread_locator.set(Window::DimY, Window::Dimension(ni, n_threads));
404
405 thread_locator.validate();
406
407 kernel->run_nd(win, info, thread_locator);
408 }
409 );
410 }
411 }
412 run_workloads(workloads);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100413 }
414 else
415 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000416 const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
417 const unsigned int num_threads = std::min(num_iterations, _impl->_num_threads);
418
419 if(num_iterations == 0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100420 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000421 return;
422 }
423
424 if(!kernel->is_parallelisable() || num_threads == 1)
425 {
426 ThreadInfo info;
427 info.cpu_info = &_cpu_info;
428 kernel->run(max_window, info);
429 }
430 else
431 {
432 unsigned int num_windows = 0;
433 switch(hints.strategy())
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100434 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000435 case StrategyHint::STATIC:
436 num_windows = num_threads;
437 break;
438 case StrategyHint::DYNAMIC:
439 {
440 const unsigned int granule_threshold = (hints.threshold() <= 0) ? num_threads : static_cast<unsigned int>(hints.threshold());
441 // Make sure we don't use some windows which are too small as this might create some contention on the ThreadFeeder
442 num_windows = num_iterations > granule_threshold ? granule_threshold : num_iterations;
443 break;
444 }
445 default:
446 ARM_COMPUTE_ERROR("Unknown strategy");
Anthony Barbier376c85f2018-05-25 14:17:21 +0100447 }
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000448 std::vector<IScheduler::Workload> workloads(num_windows);
449 for(unsigned int t = 0; t < num_windows; t++)
Anthony Barbier376c85f2018-05-25 14:17:21 +0100450 {
Joseph Dobson6f8b17d2020-02-11 19:32:11 +0000451 //Capture 't' by copy, all the other variables by reference:
452 workloads[t] = [t, &hints, &max_window, &num_windows, &kernel](const ThreadInfo & info)
453 {
454 Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
455 win.validate();
456 kernel->run(win, info);
457 };
458 }
459 run_workloads(workloads);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100460 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100461 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100462}
Moritz Pflanzerff06f202017-09-08 13:48:23 +0100463} // namespace arm_compute