blob: 29135f42c0255a4ef14865c4e75af553dd843be7 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Joseph Dobson6f8b17d2020-02-11 19:32:11 +00002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_ISCHEDULER_H
25#define ARM_COMPUTE_ISCHEDULER_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
Moritz Pflanzerc186b572017-09-07 09:48:04 +010027#include "arm_compute/core/CPP/CPPTypes.h"
Michalis Spyroubcd23522020-05-21 15:02:36 +010028#include "arm_compute/core/Types.h"
29#include "arm_compute/core/experimental/Types.h"
Moritz Pflanzerc186b572017-09-07 09:48:04 +010030
Anthony Barbier52ecb062018-05-25 13:32:10 +010031#include <functional>
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000032#include <limits>
Anthony Barbier52ecb062018-05-25 13:32:10 +010033
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034namespace arm_compute
35{
36class ICPPKernel;
Michalis Spyroubcd23522020-05-21 15:02:36 +010037class ITensor;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038
39/** Scheduler interface to run kernels */
40class IScheduler
41{
42public:
Anthony Barbier376c85f2018-05-25 14:17:21 +010043 /** Strategies available to split a workload */
44 enum class StrategyHint
45 {
46 STATIC, /**< Split the workload evenly among the threads */
47 DYNAMIC, /**< Split the workload dynamically using a bucket system */
48 };
Joseph Dobson6f8b17d2020-02-11 19:32:11 +000049
50 /** When arm_compute::ISchedular::Hints::_split_dimension is initialized with this value
51 * then the schedular is free to break down the problem space over as many dimensions
52 * as it wishes
53 */
54 static constexpr unsigned int split_dimensions_all = std::numeric_limits<unsigned>::max();
55
Anthony Barbier376c85f2018-05-25 14:17:21 +010056 /** Scheduler hints
57 *
58 * Collection of preferences set by the function regarding how to split a given workload
59 */
60 class Hints
61 {
62 public:
63 /** Constructor
64 *
65 * @param[in] split_dimension Dimension along which to split the kernel's execution window.
66 * @param[in] strategy (Optional) Split strategy.
Georgios Pinitas77d42522019-11-05 13:35:47 +000067 * @param[in] threshold (Optional) Dynamic scheduling capping threshold.
Anthony Barbier376c85f2018-05-25 14:17:21 +010068 */
Georgios Pinitas77d42522019-11-05 13:35:47 +000069 Hints(unsigned int split_dimension, StrategyHint strategy = StrategyHint::STATIC, int threshold = 0)
70 : _split_dimension(split_dimension), _strategy(strategy), _threshold(threshold)
Anthony Barbier376c85f2018-05-25 14:17:21 +010071 {
72 }
73 /** Set the split_dimension hint
74 *
75 * @param[in] split_dimension Dimension along which to split the kernel's execution window.
76 *
77 * @return the Hints object
78 */
79 Hints &set_split_dimension(unsigned int split_dimension)
80 {
81 _split_dimension = split_dimension;
82 return *this;
83 }
84 /** Return the prefered split dimension
85 *
86 * @return The split dimension
87 */
88 unsigned int split_dimension() const
89 {
90 return _split_dimension;
91 }
92
93 /** Set the strategy hint
94 *
95 * @param[in] strategy Prefered strategy to use to split the workload
96 *
97 * @return the Hints object
98 */
99 Hints &set_strategy(StrategyHint strategy)
100 {
101 _strategy = strategy;
102 return *this;
103 }
104 /** Return the prefered strategy to use to split workload.
105 *
106 * @return The strategy
107 */
108 StrategyHint strategy() const
109 {
110 return _strategy;
111 }
Georgios Pinitas77d42522019-11-05 13:35:47 +0000112 /** Return the granule capping threshold to be used by dynamic scheduling.
113 *
114 * @return The capping threshold
115 */
116 int threshold() const
117 {
118 return _threshold;
119 }
Anthony Barbier376c85f2018-05-25 14:17:21 +0100120
121 private:
122 unsigned int _split_dimension;
123 StrategyHint _strategy;
Georgios Pinitas77d42522019-11-05 13:35:47 +0000124 int _threshold;
Anthony Barbier376c85f2018-05-25 14:17:21 +0100125 };
Anthony Barbier52ecb062018-05-25 13:32:10 +0100126 /** Signature for the workloads to execute */
127 using Workload = std::function<void(const ThreadInfo &)>;
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100128 /** Default constructor. */
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100129 IScheduler();
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100130
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131 /** Destructor. */
132 virtual ~IScheduler() = default;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100133
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100134 /** Sets the number of threads the scheduler will use to run the kernels.
135 *
136 * @param[in] num_threads If set to 0, then one thread per CPU core available on the system will be used, otherwise the number of threads specified.
137 */
138 virtual void set_num_threads(unsigned int num_threads) = 0;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100139
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100140 /** Returns the number of threads that the SingleThreadScheduler has in his pool.
141 *
142 * @return Number of threads available in SingleThreadScheduler.
143 */
144 virtual unsigned int num_threads() const = 0;
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100145
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100146 /** Runs the kernel in the same thread as the caller synchronously.
147 *
Anthony Barbier376c85f2018-05-25 14:17:21 +0100148 * @param[in] kernel Kernel to execute.
149 * @param[in] hints Hints for the scheduler.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100150 */
Anthony Barbier376c85f2018-05-25 14:17:21 +0100151 virtual void schedule(ICPPKernel *kernel, const Hints &hints) = 0;
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100152
Michalis Spyroubcd23522020-05-21 15:02:36 +0100153 /** Runs the kernel in the same thread as the caller synchronously.
154 *
155 * @param[in] kernel Kernel to execute.
156 * @param[in] hints Hints for the scheduler.
157 * @param[in] inputs Vector containing the input tensors.
158 * @param[in] outputs Vector containing the output tensors.
159 */
Michalis Spyrouce0c6752020-06-18 10:14:57 +0100160 virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) = 0;
Michalis Spyroubcd23522020-05-21 15:02:36 +0100161
Anthony Barbier52ecb062018-05-25 13:32:10 +0100162 /** Execute all the passed workloads
163 *
164 * @note there is no guarantee regarding the order in which the workloads will be executed or whether or not they will be executed in parallel.
165 *
166 * @param[in] workloads Array of workloads to run
Anthony Barbier148b0752018-09-11 14:19:39 +0100167 * @param[in] tag String that can be used by profiling tools to identify the workloads run by the scheduler (Can be null).
Anthony Barbier52ecb062018-05-25 13:32:10 +0100168 */
Anthony Barbier148b0752018-09-11 14:19:39 +0100169 virtual void run_tagged_workloads(std::vector<Workload> &workloads, const char *tag);
Anthony Barbier52ecb062018-05-25 13:32:10 +0100170
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100171 /** Get CPU info.
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100172 *
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100173 * @return CPU info.
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100174 */
Pablo Tello7fad9b12018-03-14 17:55:27 +0000175 CPUInfo &cpu_info();
Georgios Pinitas53d12272018-02-01 20:23:25 +0000176 /** Get a hint for the best possible number of execution threads
177 *
178 * @warning In case we can't work out the best number of threads,
179 * std::thread::hardware_concurrency() is returned else 1 in case of bare metal builds
180 *
181 * @return Best possible number of execution threads to use
182 */
183 unsigned int num_threads_hint() const;
Moritz Pflanzerc186b572017-09-07 09:48:04 +0100184
185protected:
Anthony Barbier148b0752018-09-11 14:19:39 +0100186 /** Execute all the passed workloads
187 *
188 * @note there is no guarantee regarding the order in which the workloads will be executed or whether or not they will be executed in parallel.
189 *
190 * @param[in] workloads Array of workloads to run
191 */
192 virtual void run_workloads(std::vector<Workload> &workloads) = 0;
Pablo Tello7fad9b12018-03-14 17:55:27 +0000193 CPUInfo _cpu_info;
Georgios Pinitas53d12272018-02-01 20:23:25 +0000194
195private:
196 unsigned int _num_threads_hint = {};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100197};
Georgios Pinitas77d42522019-11-05 13:35:47 +0000198} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000199#endif /* ARM_COMPUTE_ISCHEDULER_H */