blob: a5e20ee627bbef8f815480dd93a240c9805a9729 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Joseph Dobson6f8b17d2020-02-11 19:32:11 +00002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_ISCHEDULER_H
25#define ARM_COMPUTE_ISCHEDULER_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
Moritz Pflanzerc186b572017-09-07 09:48:04 +010027#include "arm_compute/core/CPP/CPPTypes.h"
28
Anthony Barbier52ecb062018-05-25 13:32:10 +010029#include <functional>
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000030#include <limits>
Anthony Barbier52ecb062018-05-25 13:32:10 +010031
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032namespace arm_compute
33{
34class ICPPKernel;
35
36/** Scheduler interface to run kernels */
37class IScheduler
38{
39public:
Anthony Barbier376c85f2018-05-25 14:17:21 +010040 /** Strategies available to split a workload */
41 enum class StrategyHint
42 {
43 STATIC, /**< Split the workload evenly among the threads */
44 DYNAMIC, /**< Split the workload dynamically using a bucket system */
45 };
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000046
47 /** When arm_compute::ISchedular::Hints::_split_dimension is initialized with this value
48 * then the schedular is free to break down the problem space over as many dimensions
49 * as it wishes
50 */
51 static constexpr unsigned int split_dimensions_all = std::numeric_limits<unsigned>::max();
52
Anthony Barbier376c85f2018-05-25 14:17:21 +010053 /** Scheduler hints
54 *
55 * Collection of preferences set by the function regarding how to split a given workload
56 */
57 class Hints
58 {
59 public:
60 /** Constructor
61 *
62 * @param[in] split_dimension Dimension along which to split the kernel's execution window.
63 * @param[in] strategy (Optional) Split strategy.
Georgios Pinitas77d42522019-11-05 13:35:47 +000064 * @param[in] threshold (Optional) Dynamic scheduling capping threshold.
Anthony Barbier376c85f2018-05-25 14:17:21 +010065 */
Georgios Pinitas77d42522019-11-05 13:35:47 +000066 Hints(unsigned int split_dimension, StrategyHint strategy = StrategyHint::STATIC, int threshold = 0)
67 : _split_dimension(split_dimension), _strategy(strategy), _threshold(threshold)
Anthony Barbier376c85f2018-05-25 14:17:21 +010068 {
69 }
70 /** Set the split_dimension hint
71 *
72 * @param[in] split_dimension Dimension along which to split the kernel's execution window.
73 *
74 * @return the Hints object
75 */
76 Hints &set_split_dimension(unsigned int split_dimension)
77 {
78 _split_dimension = split_dimension;
79 return *this;
80 }
81 /** Return the prefered split dimension
82 *
83 * @return The split dimension
84 */
85 unsigned int split_dimension() const
86 {
87 return _split_dimension;
88 }
89
90 /** Set the strategy hint
91 *
92 * @param[in] strategy Prefered strategy to use to split the workload
93 *
94 * @return the Hints object
95 */
96 Hints &set_strategy(StrategyHint strategy)
97 {
98 _strategy = strategy;
99 return *this;
100 }
101 /** Return the prefered strategy to use to split workload.
102 *
103 * @return The strategy
104 */
105 StrategyHint strategy() const
106 {
107 return _strategy;
108 }
Georgios Pinitas77d42522019-11-05 13:35:47 +0000109 /** Return the granule capping threshold to be used by dynamic scheduling.
110 *
111 * @return The capping threshold
112 */
113 int threshold() const
114 {
115 return _threshold;
116 }
Anthony Barbier376c85f2018-05-25 14:17:21 +0100117
118 private:
119 unsigned int _split_dimension;
120 StrategyHint _strategy;
Georgios Pinitas77d42522019-11-05 13:35:47 +0000121 int _threshold;
Anthony Barbier376c85f2018-05-25 14:17:21 +0100122 };
Anthony Barbier52ecb062018-05-25 13:32:10 +0100123 /** Signature for the workloads to execute */
124 using Workload = std::function<void(const ThreadInfo &)>;
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100125 /** Default constructor. */
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100126 IScheduler();
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100127
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100128 /** Destructor. */
129 virtual ~IScheduler() = default;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100130
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131 /** Sets the number of threads the scheduler will use to run the kernels.
132 *
133 * @param[in] num_threads If set to 0, then one thread per CPU core available on the system will be used, otherwise the number of threads specified.
134 */
135 virtual void set_num_threads(unsigned int num_threads) = 0;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100136
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100137 /** Returns the number of threads that the SingleThreadScheduler has in his pool.
138 *
139 * @return Number of threads available in SingleThreadScheduler.
140 */
141 virtual unsigned int num_threads() const = 0;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100142
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100143 /** Runs the kernel in the same thread as the caller synchronously.
144 *
Anthony Barbier376c85f2018-05-25 14:17:21 +0100145 * @param[in] kernel Kernel to execute.
146 * @param[in] hints Hints for the scheduler.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100147 */
Anthony Barbier376c85f2018-05-25 14:17:21 +0100148 virtual void schedule(ICPPKernel *kernel, const Hints &hints) = 0;
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100149
Anthony Barbier52ecb062018-05-25 13:32:10 +0100150 /** Execute all the passed workloads
151 *
152 * @note there is no guarantee regarding the order in which the workloads will be executed or whether or not they will be executed in parallel.
153 *
154 * @param[in] workloads Array of workloads to run
Anthony Barbier148b0752018-09-11 14:19:39 +0100155 * @param[in] tag String that can be used by profiling tools to identify the workloads run by the scheduler (Can be null).
Anthony Barbier52ecb062018-05-25 13:32:10 +0100156 */
Anthony Barbier148b0752018-09-11 14:19:39 +0100157 virtual void run_tagged_workloads(std::vector<Workload> &workloads, const char *tag);
Anthony Barbier52ecb062018-05-25 13:32:10 +0100158
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100159 /** Get CPU info.
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100160 *
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100161 * @return CPU info.
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100162 */
Pablo Tello7fad9b12018-03-14 17:55:27 +0000163 CPUInfo &cpu_info();
Georgios Pinitas53d12272018-02-01 20:23:25 +0000164 /** Get a hint for the best possible number of execution threads
165 *
166 * @warning In case we can't work out the best number of threads,
167 * std::thread::hardware_concurrency() is returned else 1 in case of bare metal builds
168 *
169 * @return Best possible number of execution threads to use
170 */
171 unsigned int num_threads_hint() const;
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100172
173protected:
Anthony Barbier148b0752018-09-11 14:19:39 +0100174 /** Execute all the passed workloads
175 *
176 * @note there is no guarantee regarding the order in which the workloads will be executed or whether or not they will be executed in parallel.
177 *
178 * @param[in] workloads Array of workloads to run
179 */
180 virtual void run_workloads(std::vector<Workload> &workloads) = 0;
Pablo Tello7fad9b12018-03-14 17:55:27 +0000181 CPUInfo _cpu_info;
Georgios Pinitas53d12272018-02-01 20:23:25 +0000182
183private:
184 unsigned int _num_threads_hint = {};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100185};
Georgios Pinitas77d42522019-11-05 13:35:47 +0000186} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000187#endif /* ARM_COMPUTE_ISCHEDULER_H */