blob: 43df3d5e2391347e4f87248cd66fb9003df922c4 [file] [log] [blame]
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +01001/*
Georgios Pinitas06e890b2020-07-09 18:38:34 +01002 * Copyright (c) 2016-2020 Arm Limited.
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/IScheduler.h"
25
morgolock51112642020-08-20 14:51:39 +010026#include "arm_compute/core/CPP/ICPPKernel.h"
Anthony Barbier148b0752018-09-11 14:19:39 +010027#include "arm_compute/core/Error.h"
morgolock51112642020-08-20 14:51:39 +010028#include "arm_compute/core/Window.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010029#include "src/runtime/CPUUtils.h"
30#include "src/runtime/SchedulerUtils.h"
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010031
32namespace arm_compute
33{
34IScheduler::IScheduler()
Pablo Tello7fad9b12018-03-14 17:55:27 +000035 : _cpu_info()
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010036{
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010037 utils::cpu::get_cpu_configuration(_cpu_info);
Georgios Pinitas53d12272018-02-01 20:23:25 +000038 // Work out the best possible number of execution threads
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010039 _num_threads_hint = utils::cpu::get_threads_hint();
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010040}
41
Pablo Tello7fad9b12018-03-14 17:55:27 +000042CPUInfo &IScheduler::cpu_info()
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010043{
Pablo Tello7fad9b12018-03-14 17:55:27 +000044 return _cpu_info;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010045}
Georgios Pinitas53d12272018-02-01 20:23:25 +000046
Georgios Pinitas06e890b2020-07-09 18:38:34 +010047void IScheduler::set_num_threads_with_affinity(unsigned int num_threads, BindFunc func)
48{
49 ARM_COMPUTE_UNUSED(num_threads, func);
50 ARM_COMPUTE_ERROR("Feature for affinity setting is not implemented");
51}
52
Georgios Pinitas53d12272018-02-01 20:23:25 +000053unsigned int IScheduler::num_threads_hint() const
54{
55 return _num_threads_hint;
56}
morgolock51112642020-08-20 14:51:39 +010057
58void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors)
59{
60 ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
61 ARM_COMPUTE_UNUSED(kernel);
62 ARM_COMPUTE_UNUSED(hints);
63 ARM_COMPUTE_UNUSED(tensors);
64#ifndef BARE_METAL
65 const Window &max_window = kernel->window();
66 if(hints.split_dimension() == IScheduler::split_dimensions_all)
67 {
68 /*
69 * if the split dim is size_t max then this signals we should parallelise over
70 * all dimensions
71 */
72 const std::size_t m = max_window.num_iterations(Window::DimX);
73 const std::size_t n = max_window.num_iterations(Window::DimY);
74
75 //in c++17 this can be swapped for auto [ m_threads, n_threads ] = split_2d(...
76 unsigned m_threads, n_threads;
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010077 std::tie(m_threads, n_threads) = scheduler_utils::split_2d(this->num_threads(), m, n);
morgolock51112642020-08-20 14:51:39 +010078
79 std::vector<IScheduler::Workload> workloads;
80 for(unsigned int ni = 0; ni != n_threads; ++ni)
81 {
82 for(unsigned int mi = 0; mi != m_threads; ++mi)
83 {
84 workloads.push_back(
85 [ni, mi, m_threads, n_threads, &max_window, &kernel](const ThreadInfo & info)
86 {
87 //narrow the window to our mi-ni workload
88 Window win = max_window.split_window(Window::DimX, mi, m_threads)
89 .split_window(Window::DimY, ni, n_threads);
90
91 win.validate();
92
93 Window thread_locator;
94 thread_locator.set(Window::DimX, Window::Dimension(mi, m_threads));
95 thread_locator.set(Window::DimY, Window::Dimension(ni, n_threads));
96
97 thread_locator.validate();
98
99 kernel->run_nd(win, info, thread_locator);
100 });
101 }
102 }
103 run_workloads(workloads);
104 }
105 else
106 {
107 const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
108 const unsigned int num_threads = std::min(num_iterations, this->num_threads());
109
110 if(num_iterations == 0)
111 {
112 return;
113 }
114
115 if(!kernel->is_parallelisable() || num_threads == 1)
116 {
117 ThreadInfo info;
118 info.cpu_info = &_cpu_info;
119 if(tensors.empty())
120 {
121 kernel->run(max_window, info);
122 }
123 else
124 {
125 kernel->run_op(tensors, max_window, info);
126 }
127 }
128 else
129 {
130 unsigned int num_windows = 0;
131 switch(hints.strategy())
132 {
133 case StrategyHint::STATIC:
134 num_windows = num_threads;
135 break;
136 case StrategyHint::DYNAMIC:
137 {
138 const unsigned int granule_threshold = (hints.threshold() <= 0) ? num_threads : static_cast<unsigned int>(hints.threshold());
139 // Make sure we don't use some windows which are too small as this might create some contention on the ThreadFeeder
140 num_windows = num_iterations > granule_threshold ? granule_threshold : num_iterations;
141 break;
142 }
143 default:
144 ARM_COMPUTE_ERROR("Unknown strategy");
145 }
146 std::vector<IScheduler::Workload> workloads(num_windows);
147 for(unsigned int t = 0; t < num_windows; ++t)
148 {
149 //Capture 't' by copy, all the other variables by reference:
150 workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo & info)
151 {
152 Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
153 win.validate();
154
155 if(tensors.empty())
156 {
157 kernel->run(win, info);
158 }
159 else
160 {
161 kernel->run_op(tensors, win, info);
162 }
163 };
164 }
165 run_workloads(workloads);
166 }
167 }
168#endif /* !BARE_METAL */
169}
170
Anthony Barbier148b0752018-09-11 14:19:39 +0100171void IScheduler::run_tagged_workloads(std::vector<Workload> &workloads, const char *tag)
172{
173 ARM_COMPUTE_UNUSED(tag);
174 run_workloads(workloads);
175}
176
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100177} // namespace arm_compute