blob: c82809cef35930fbbb8169c8aa08c17d82026d2e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
SiCong Li47f177e2023-02-22 17:24:09 +00002 * Copyright (c) 2016-2023 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_ICLKERNEL_H
25#define ARM_COMPUTE_ICLKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu015f910722017-08-23 10:15:22 +010027#include "arm_compute/core/CL/CLKernelLibrary.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/CL/CLTypes.h"
29#include "arm_compute/core/CL/OpenCL.h"
Michele Di Giorgiob8fc60f2018-04-25 11:58:07 +010030#include "arm_compute/core/GPUTarget.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/IKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010032#include "arm_compute/core/Validate.h"
Michalis Spyrou2aad21a2020-07-02 12:43:53 +010033#include "arm_compute/core/experimental/Types.h"
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000034#include "arm_compute/runtime/CL/CLTuningParams.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
Giorgio Arenaba2dd822021-07-28 16:10:03 +010036#include "src/core/CL/DefaultLWSHeuristics.h"
37
Gian Marcode691f02017-09-08 16:13:11 +010038#include <string>
39
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040namespace arm_compute
41{
Giorgio Arena4a95bba2021-06-28 11:00:27 +010042namespace
43{
44bool is_same_lws(cl::NDRange lws0, cl::NDRange lws1)
45{
46 if(lws0.dimensions() != lws1.dimensions())
47 {
48 return false;
49 }
50
51 for(size_t i = 0; i < lws0.dimensions(); ++i)
52 {
53 if(lws0.get()[i] != lws1.get()[i])
54 {
55 return false;
56 }
57 }
58
59 return true;
60}
61} // namespace
SiCong Li3e363692017-07-04 15:02:10 +010062template <typename T>
63class ICLArray;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064class ICLTensor;
65class Window;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010066/** Common interface for all the OpenCL kernels */
67class ICLKernel : public IKernel
68{
Diego Lopez Recas0021d752017-12-18 14:42:56 +000069private:
70 /** Returns the number of arguments enqueued per array object.
71 *
72 * @return The number of arguments enqueued per array object.
73 */
74 template <unsigned int dimension_size>
75 constexpr static unsigned int num_arguments_per_array()
76 {
77 return num_arguments_per_tensor<dimension_size>();
78 }
79 /** Returns the number of arguments enqueued per tensor object.
80 *
81 * @return The number of arguments enqueued per tensor object.
82 */
83 template <unsigned int dimension_size>
84 constexpr static unsigned int num_arguments_per_tensor()
85 {
86 return 2 + 2 * dimension_size;
87 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +010088
SiCong Li47f177e2023-02-22 17:24:09 +000089 /** Get default lws for the kernel
90 *
91 * @param[in] window Execution window used by the kernel
92 * @param[in] use_dummy_work_items If the kernel uses dummy workloads
93 *
94 * @return cl::NDRange
95 */
96 cl::NDRange default_lws_tune(const Window &window, bool use_dummy_work_items)
Giorgio Arena4a95bba2021-06-28 11:00:27 +010097 {
SiCong Li47f177e2023-02-22 17:24:09 +000098 return get_default_lws_for_type(_type, gws_from_window(window, use_dummy_work_items));
Giorgio Arena4a95bba2021-06-28 11:00:27 +010099 }
100
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100101 using IKernel::configure; //Prevent children from calling IKernel::configure() directly
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100102protected:
103 /** Configure the kernel's window and local workgroup size hint.
104 *
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000105 * @param[in] window The maximum window which will be returned by window()
106 * @param[in] lws_hint Local-Workgroup-Size to use.
107 * @param[in] wbsm_hint (Optional) Workgroup-Batch-Size-Modifier to use.
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100108 */
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000109 void configure_internal(const Window &window, cl::NDRange lws_hint, cl_int wbsm_hint = 0)
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100110 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000111 configure_internal(window, CLTuningParams(lws_hint, wbsm_hint));
112 }
113
114 /** Configure the kernel's window and tuning parameters hints.
115 *
116 * @param[in] window The maximum window which will be returned by window()
117 * @param[in] tuning_params_hint (Optional) Tuning parameters to use.
118 */
119 void configure_internal(const Window &window, CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(), 0))
120 {
121 _tuning_params_hint = tuning_params_hint;
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100122
123 if(is_same_lws(_tuning_params_hint.get_lws(), CLKernelLibrary::get().default_ndrange()))
124 {
SiCong Li47f177e2023-02-22 17:24:09 +0000125 // Disable use_dummy_work_items at configure time. Because dummy work items only affect gws size, which
126 // will be recalculated with use_dummy_work_items flag at run time again anyway.
127 _tuning_params_hint.set_lws(default_lws_tune(window, false /* use_dummy_work_items */));
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100128 }
129
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100130 IKernel::configure(window);
131 }
132
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100133public:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100134 /** Constructor */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000135 ICLKernel()
SiCong Li47f177e2023-02-22 17:24:09 +0000136 : _kernel(nullptr), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0), _type(CLKernelType::UNKNOWN), _tuning_params_hint(), _cached_gws(cl::NullRange)
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000137 {
138 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100139 /** Returns a reference to the OpenCL kernel of this object.
140 *
141 * @return A reference to the OpenCL kernel of this object.
142 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000143 cl::Kernel &kernel()
144 {
145 return _kernel;
146 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100147 /** Returns the CL kernel type
148 *
149 * @return The CL kernel type
150 */
151 CLKernelType type() const
152 {
153 return _type;
154 }
SiCong Li3e363692017-07-04 15:02:10 +0100155 /** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
156 *
157 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
158 * @param[in] array Array to set as an argument of the object's kernel.
159 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
160 * @param[in] num_dimensions Number of dimensions of the @p array.
161 * @param[in] window Window the kernel will be executed on.
162 */
163 template <typename T>
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000164 void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
165 {
166 add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
167 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100168 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
169 *
170 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
171 * @param[in] tensor Tensor to set as an argument of the object's kernel.
172 * @param[in] window Window the kernel will be executed on.
173 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000174 void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
175 {
176 add_tensor_argument<1>(idx, tensor, window);
177 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100178 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
179 *
180 * @param[in] cond Condition to check
181 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
182 * @param[in] tensor Tensor to set as an argument of the object's kernel.
183 * @param[in] window Window the kernel will be executed on.
184 */
185 void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
186 {
187 if(cond)
188 {
189 add_1D_tensor_argument(idx, tensor, window);
190 }
191 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100192 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
193 *
194 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
195 * @param[in] tensor Tensor to set as an argument of the object's kernel.
196 * @param[in] window Window the kernel will be executed on.
197 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000198 void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
199 {
200 add_tensor_argument<2>(idx, tensor, window);
201 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100202 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
203 *
204 * @param[in] cond Condition to check
205 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
206 * @param[in] tensor Tensor to set as an argument of the object's kernel.
207 * @param[in] window Window the kernel will be executed on.
208 */
209 void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
210 {
211 if(cond)
212 {
213 add_2D_tensor_argument(idx, tensor, window);
214 }
215 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100216 /** Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
217 *
218 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
219 * @param[in] tensor Tensor to set as an argument of the object's kernel.
220 * @param[in] window Window the kernel will be executed on.
221 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000222 void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
223 {
224 add_tensor_argument<3>(idx, tensor, window);
225 }
steniu01868e5412017-07-17 23:16:00 +0100226 /** Add the passed 4D tensor's parameters to the object's kernel's arguments starting from the index idx.
227 *
228 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
229 * @param[in] tensor Tensor to set as an argument of the object's kernel.
230 * @param[in] window Window the kernel will be executed on.
231 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000232 void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
233 {
234 add_tensor_argument<4>(idx, tensor, window);
235 }
ramelg0137515692022-02-26 22:06:20 +0000236 /** Add the passed 5D tensor's parameters to the object's kernel's arguments starting from the index idx.
237 *
238 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
239 * @param[in] tensor Tensor to set as an argument of the object's kernel.
240 * @param[in] window Window the kernel will be executed on.
241 */
242 void add_5D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
243 {
244 add_tensor_argument<5>(idx, tensor, window);
245 }
Adnan AlSinan17975a62021-11-08 17:46:39 +0000246
Gian Marco Iodice4fb56702021-11-10 11:18:50 +0000247 /** Add the passed NHW 3D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
248 *
249 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
250 * @param[in] tensor Tensor to set as an argument of the object's kernel.
251 */
252 void add_3d_tensor_nhw_argument(unsigned int &idx, const ICLTensor *tensor);
253
254 /** Returns the number of arguments enqueued per NHW 3D Tensor object.
255 *
256 * @return The number of arguments enqueued per NHW 3D Tensor object.
257 */
258 constexpr static unsigned int num_arguments_per_3d_tensor_nhw()
259 {
260 constexpr unsigned int no_args_per_3d_tensor_nhw = 7u;
261 return no_args_per_3d_tensor_nhw;
262 }
263
Adnan AlSinan17975a62021-11-08 17:46:39 +0000264 /** Add the passed NHWC 4D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
265 *
266 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
267 * @param[in] tensor Tensor to set as an argument of the object's kernel.
268 */
269 void add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor);
270
271 /** Returns the number of arguments enqueued per NHWC 4D Tensor object.
272 *
273 * @return The number of arguments enqueued per NHWC 4D Tensor object.
274 */
275 constexpr static unsigned int num_arguments_per_4d_tensor_nhwc()
276 {
277 constexpr unsigned int no_args_per_4d_tensor_nhwc = 9u;
278 return no_args_per_4d_tensor_nhwc;
279 }
280
SiCong Li3e363692017-07-04 15:02:10 +0100281 /** Returns the number of arguments enqueued per 1D array object.
282 *
283 * @return The number of arguments enqueues per 1D array object.
284 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000285 constexpr static unsigned int num_arguments_per_1D_array()
286 {
287 return num_arguments_per_array<1>();
288 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100289 /** Returns the number of arguments enqueued per 1D tensor object.
290 *
291 * @return The number of arguments enqueues per 1D tensor object.
292 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000293 constexpr static unsigned int num_arguments_per_1D_tensor()
294 {
295 return num_arguments_per_tensor<1>();
296 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100297 /** Returns the number of arguments enqueued per 2D tensor object.
298 *
299 * @return The number of arguments enqueues per 2D tensor object.
300 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000301 constexpr static unsigned int num_arguments_per_2D_tensor()
302 {
303 return num_arguments_per_tensor<2>();
304 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100305 /** Returns the number of arguments enqueued per 3D tensor object.
306 *
307 * @return The number of arguments enqueues per 3D tensor object.
308 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000309 constexpr static unsigned int num_arguments_per_3D_tensor()
310 {
311 return num_arguments_per_tensor<3>();
312 }
steniu01868e5412017-07-17 23:16:00 +0100313 /** Returns the number of arguments enqueued per 4D tensor object.
314 *
315 * @return The number of arguments enqueues per 4D tensor object.
316 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000317 constexpr static unsigned int num_arguments_per_4D_tensor()
318 {
319 return num_arguments_per_tensor<4>();
320 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100321 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
322 *
323 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
324 *
325 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
326 * @param[in,out] queue Command queue on which to enqueue the kernel.
327 */
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100328 virtual void run(const Window &window, cl::CommandQueue &queue)
329 {
330 ARM_COMPUTE_UNUSED(window, queue);
331 }
332 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
333 *
334 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
335 *
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100336 * @param[in] tensors A vector containing the tensors to operato on.
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100337 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
338 * @param[in,out] queue Command queue on which to enqueue the kernel.
339 */
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100340 virtual void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100341 {
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100342 ARM_COMPUTE_UNUSED(tensors, window, queue);
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100343 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100344 /** Add the passed parameters to the object's kernel's arguments starting from the index idx.
345 *
346 * @param[in,out] idx Index at which to start adding the arguments. Will be incremented by the number of kernel arguments set.
347 * @param[in] value Value to set as an argument of the object's kernel.
348 */
349 template <typename T>
350 void add_argument(unsigned int &idx, T value)
351 {
352 _kernel.setArg(idx++, value);
353 }
354
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100355 /** Set the Local-Workgroup-Size hint
356 *
357 * @note This method should be called after the configuration of the kernel
358 *
359 * @param[in] lws_hint Local-Workgroup-Size to use
360 */
Anthony Barbierd727e852018-04-20 11:05:29 +0100361 void set_lws_hint(const cl::NDRange &lws_hint)
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100362 {
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100363 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // lws_hint will be overwritten by configure()
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000364 _tuning_params_hint.set_lws(lws_hint);
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100365 }
366
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000367 /** Return the Local-Workgroup-Size hint
368 *
369 * @return Current lws hint
370 */
371 cl::NDRange lws_hint() const
372 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000373 return _tuning_params_hint.get_lws();
374 }
375
376 /** Set the workgroup batch size modifier hint
377 *
378 * @note This method should be called after the configuration of the kernel
379 *
380 * @param[in] wbsm_hint workgroup batch size modifier value
381 */
382 void set_wbsm_hint(const cl_int &wbsm_hint)
383 {
384 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // wbsm_hint will be overwritten by configure()
385 _tuning_params_hint.set_wbsm(wbsm_hint);
386 }
387
388 /** Return the workgroup batch size modifier hint
389 *
390 * @return Current wbsm hint
391 */
392 cl_int wbsm_hint() const
393 {
394 return _tuning_params_hint.get_wbsm();
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000395 }
396
Gian Marcode691f02017-09-08 16:13:11 +0100397 /** Get the configuration ID
398 *
399 * @note The configuration ID can be used by the caller to distinguish different calls of the same OpenCL kernel
400 * In particular, this method can be used by CLScheduler to keep track of the best LWS for each configuration of the same kernel.
401 * The configuration ID should be provided only for the kernels potentially affected by the LWS geometry
402 *
403 * @note This method should be called after the configuration of the kernel
404 *
405 * @return configuration id string
406 */
407 const std::string &config_id() const
408 {
409 return _config_id;
410 }
411
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100412 /** Set the targeted GPU architecture
413 *
414 * @param[in] target The targeted GPU architecture
415 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000416 void set_target(GPUTarget target)
417 {
418 _target = target;
419 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100420
421 /** Set the targeted GPU architecture according to the CL device
422 *
423 * @param[in] device A CL device
424 */
425 void set_target(cl::Device &device);
426
427 /** Get the targeted GPU architecture
428 *
429 * @return The targeted GPU architecture.
430 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000431 GPUTarget get_target() const
432 {
433 return _target;
434 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100435
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100436 /** Get the maximum workgroup size for the device the CLKernelLibrary uses.
437 *
438 * @return The maximum workgroup size value.
439 */
440 size_t get_max_workgroup_size();
Georgios Pinitas1f378ee2017-10-27 13:37:16 +0100441 /** Get the global work size given an execution window
442 *
SiCong Li47f177e2023-02-22 17:24:09 +0000443 * @param[in] window Execution window
444 * @param[in] use_dummy_work_items If the kernel uses dummy work items
Georgios Pinitas1f378ee2017-10-27 13:37:16 +0100445 *
446 * @return Global work size of the given execution window
447 */
SiCong Li47f177e2023-02-22 17:24:09 +0000448 static cl::NDRange gws_from_window(const Window &window, bool use_dummy_work_items);
449
450 /** Get the cached gws used to enqueue this kernel
451 *
452 * @return Latest global work size of the kernel
453 */
454 cl::NDRange get_cached_gws() const;
455
456 /** Cache the latest gws used to enqueue this kernel
457 *
458 * @param[in] gws Latest global work size of the kernel
459 */
460 void cache_gws(const cl::NDRange &gws);
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100461
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100462private:
SiCong Li3e363692017-07-04 15:02:10 +0100463 /** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
464 *
465 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
466 * @param[in] array Array to set as an argument of the object's kernel.
467 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
468 * @param[in] num_dimensions Number of dimensions of the @p array.
469 * @param[in] window Window the kernel will be executed on.
470 */
471 template <typename T, unsigned int dimension_size>
472 void add_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100473 /** Add the passed tensor's parameters to the object's kernel's arguments starting from the index idx.
474 *
475 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
476 * @param[in] tensor Tensor to set as an argument of the object's kernel.
477 * @param[in] window Window the kernel will be executed on.
478 */
479 template <unsigned int dimension_size>
480 void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100481
482protected:
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100483 cl::Kernel _kernel; /**< OpenCL kernel to run */
484 GPUTarget _target; /**< The targeted GPU */
485 std::string _config_id; /**< Configuration ID */
486 size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
487 CLKernelType _type; /**< The CL kernel type */
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100488private:
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000489 CLTuningParams _tuning_params_hint; /**< Tuning parameters hint for the OpenCL kernel */
SiCong Li47f177e2023-02-22 17:24:09 +0000490 cl::NDRange _cached_gws; /**< Latest GWS used to enqueue this kernel */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100491};
492
493/** Add the kernel to the command queue with the given window.
494 *
495 * @note Depending on the size of the window, this might translate into several jobs being enqueued.
496 *
497 * @note If kernel->kernel() is empty then the function will return without adding anything to the queue.
498 *
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000499 * @param[in,out] queue OpenCL command queue.
500 * @param[in] kernel Kernel to enqueue
501 * @param[in] window Window the kernel has to process.
502 * @param[in] lws_hint (Optional) Local workgroup size requested. Default is based on the device target.
503 * @param[in] use_dummy_work_items (Optional) Use dummy work items in order to have two dimensional power of two NDRange. Default is false
504 * Note: it is kernel responsibility to check if the work-item is out-of-range
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100505 *
506 * @note If any dimension of the lws is greater than the global workgroup size then no lws will be passed.
507 */
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000508void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items = false);
SiCong Li3e363692017-07-04 15:02:10 +0100509
Alex Gildayc357c472018-03-21 13:54:09 +0000510/** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
511 *
512 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
513 * @param[in] array Array to set as an argument of the object's kernel.
514 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
515 * @param[in] num_dimensions Number of dimensions of the @p array.
516 * @param[in] window Window the kernel will be executed on.
517 */
SiCong Li3e363692017-07-04 15:02:10 +0100518template <typename T, unsigned int dimension_size>
519void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
520{
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000521 ARM_COMPUTE_ERROR_ON(array == nullptr);
522
SiCong Li3e363692017-07-04 15:02:10 +0100523 // Calculate offset to the start of the window
524 unsigned int offset_first_element = 0;
525
526 for(unsigned int n = 0; n < num_dimensions; ++n)
527 {
528 offset_first_element += window[n].start() * strides[n];
529 }
530
531 unsigned int idx_start = idx;
532 _kernel.setArg(idx++, array->cl_buffer());
533
534 for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
535 {
536 _kernel.setArg<cl_uint>(idx++, strides[dimension]);
537 _kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
538 }
539
540 _kernel.setArg<cl_uint>(idx++, offset_first_element);
541
Michalis Spyrou7c60c992019-10-10 14:33:47 +0100542 ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_array<dimension_size>() != idx,
543 "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
SiCong Li3e363692017-07-04 15:02:10 +0100544 ARM_COMPUTE_UNUSED(idx_start);
545}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100546}
Michalis Spyrouf4643372019-11-29 16:17:13 +0000547#endif /*ARM_COMPUTE_ICLKERNEL_H */