blob: bc138e7e3f6d874ee1b82fa1fe65ca8b1faf014e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinibe9f9f92021-01-25 15:07:17 +00002 * Copyright (c) 2016-2021 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_ICLKERNEL_H
25#define ARM_COMPUTE_ICLKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu015f910722017-08-23 10:15:22 +010027#include "arm_compute/core/CL/CLKernelLibrary.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/CL/CLTypes.h"
29#include "arm_compute/core/CL/OpenCL.h"
Michele Di Giorgiob8fc60f2018-04-25 11:58:07 +010030#include "arm_compute/core/GPUTarget.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/IKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010032#include "arm_compute/core/Validate.h"
Michalis Spyrou2aad21a2020-07-02 12:43:53 +010033#include "arm_compute/core/experimental/Types.h"
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000034#include "arm_compute/runtime/CL/CLTuningParams.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
Giorgio Arenaba2dd822021-07-28 16:10:03 +010036#include "src/core/CL/DefaultLWSHeuristics.h"
37
Gian Marcode691f02017-09-08 16:13:11 +010038#include <string>
39
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040namespace arm_compute
41{
Giorgio Arena4a95bba2021-06-28 11:00:27 +010042namespace
43{
44bool is_same_lws(cl::NDRange lws0, cl::NDRange lws1)
45{
46 if(lws0.dimensions() != lws1.dimensions())
47 {
48 return false;
49 }
50
51 for(size_t i = 0; i < lws0.dimensions(); ++i)
52 {
53 if(lws0.get()[i] != lws1.get()[i])
54 {
55 return false;
56 }
57 }
58
59 return true;
60}
61} // namespace
SiCong Li3e363692017-07-04 15:02:10 +010062template <typename T>
63class ICLArray;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064class ICLTensor;
65class Window;
66
67/** Common interface for all the OpenCL kernels */
68class ICLKernel : public IKernel
69{
Diego Lopez Recas0021d752017-12-18 14:42:56 +000070private:
71 /** Returns the number of arguments enqueued per array object.
72 *
73 * @return The number of arguments enqueued per array object.
74 */
75 template <unsigned int dimension_size>
76 constexpr static unsigned int num_arguments_per_array()
77 {
78 return num_arguments_per_tensor<dimension_size>();
79 }
80 /** Returns the number of arguments enqueued per tensor object.
81 *
82 * @return The number of arguments enqueued per tensor object.
83 */
84 template <unsigned int dimension_size>
85 constexpr static unsigned int num_arguments_per_tensor()
86 {
87 return 2 + 2 * dimension_size;
88 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +010089
90 cl::NDRange default_lws_tune(const Window &window)
91 {
Giorgio Arenaba2dd822021-07-28 16:10:03 +010092 return get_default_lws_for_type(_type, gws_from_window(window));
Giorgio Arena4a95bba2021-06-28 11:00:27 +010093 }
94
Anthony Barbierb6eb3532018-08-08 13:20:04 +010095 using IKernel::configure; //Prevent children from calling IKernel::configure() directly
Anthony Barbier5a65cfd2018-08-10 14:10:08 +010096protected:
97 /** Configure the kernel's window and local workgroup size hint.
98 *
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000099 * @param[in] window The maximum window which will be returned by window()
100 * @param[in] lws_hint Local-Workgroup-Size to use.
101 * @param[in] wbsm_hint (Optional) Workgroup-Batch-Size-Modifier to use.
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100102 */
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000103 void configure_internal(const Window &window, cl::NDRange lws_hint, cl_int wbsm_hint = 0)
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100104 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000105 configure_internal(window, CLTuningParams(lws_hint, wbsm_hint));
106 }
107
108 /** Configure the kernel's window and tuning parameters hints.
109 *
110 * @param[in] window The maximum window which will be returned by window()
111 * @param[in] tuning_params_hint (Optional) Tuning parameters to use.
112 */
113 void configure_internal(const Window &window, CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(), 0))
114 {
115 _tuning_params_hint = tuning_params_hint;
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100116
117 if(is_same_lws(_tuning_params_hint.get_lws(), CLKernelLibrary::get().default_ndrange()))
118 {
119 _tuning_params_hint.set_lws(default_lws_tune(window));
120 }
121
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100122 IKernel::configure(window);
123 }
124
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100125public:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100126 /** Constructor */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000127 ICLKernel()
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100128 : _kernel(nullptr), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0), _type(CLKernelType::UNKNOWN), _tuning_params_hint()
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000129 {
130 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131 /** Returns a reference to the OpenCL kernel of this object.
132 *
133 * @return A reference to the OpenCL kernel of this object.
134 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000135 cl::Kernel &kernel()
136 {
137 return _kernel;
138 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100139 /** Returns the CL kernel type
140 *
141 * @return The CL kernel type
142 */
143 CLKernelType type() const
144 {
145 return _type;
146 }
SiCong Li3e363692017-07-04 15:02:10 +0100147 /** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
148 *
149 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
150 * @param[in] array Array to set as an argument of the object's kernel.
151 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
152 * @param[in] num_dimensions Number of dimensions of the @p array.
153 * @param[in] window Window the kernel will be executed on.
154 */
155 template <typename T>
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000156 void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
157 {
158 add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
159 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100160 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
161 *
162 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
163 * @param[in] tensor Tensor to set as an argument of the object's kernel.
164 * @param[in] window Window the kernel will be executed on.
165 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000166 void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
167 {
168 add_tensor_argument<1>(idx, tensor, window);
169 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100170 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
171 *
172 * @param[in] cond Condition to check
173 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
174 * @param[in] tensor Tensor to set as an argument of the object's kernel.
175 * @param[in] window Window the kernel will be executed on.
176 */
177 void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
178 {
179 if(cond)
180 {
181 add_1D_tensor_argument(idx, tensor, window);
182 }
183 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100184 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
185 *
186 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
187 * @param[in] tensor Tensor to set as an argument of the object's kernel.
188 * @param[in] window Window the kernel will be executed on.
189 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000190 void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
191 {
192 add_tensor_argument<2>(idx, tensor, window);
193 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100194 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
195 *
196 * @param[in] cond Condition to check
197 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
198 * @param[in] tensor Tensor to set as an argument of the object's kernel.
199 * @param[in] window Window the kernel will be executed on.
200 */
201 void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
202 {
203 if(cond)
204 {
205 add_2D_tensor_argument(idx, tensor, window);
206 }
207 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100208 /** Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
209 *
210 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
211 * @param[in] tensor Tensor to set as an argument of the object's kernel.
212 * @param[in] window Window the kernel will be executed on.
213 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000214 void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
215 {
216 add_tensor_argument<3>(idx, tensor, window);
217 }
steniu01868e5412017-07-17 23:16:00 +0100218 /** Add the passed 4D tensor's parameters to the object's kernel's arguments starting from the index idx.
219 *
220 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
221 * @param[in] tensor Tensor to set as an argument of the object's kernel.
222 * @param[in] window Window the kernel will be executed on.
223 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000224 void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
225 {
226 add_tensor_argument<4>(idx, tensor, window);
227 }
Adnan AlSinan17975a62021-11-08 17:46:39 +0000228
Gian Marco Iodice4fb56702021-11-10 11:18:50 +0000229 /** Add the passed NHW 3D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
230 *
231 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
232 * @param[in] tensor Tensor to set as an argument of the object's kernel.
233 */
234 void add_3d_tensor_nhw_argument(unsigned int &idx, const ICLTensor *tensor);
235
236 /** Returns the number of arguments enqueued per NHW 3D Tensor object.
237 *
238 * @return The number of arguments enqueued per NHW 3D Tensor object.
239 */
240 constexpr static unsigned int num_arguments_per_3d_tensor_nhw()
241 {
242 constexpr unsigned int no_args_per_3d_tensor_nhw = 7u;
243 return no_args_per_3d_tensor_nhw;
244 }
245
Adnan AlSinan17975a62021-11-08 17:46:39 +0000246 /** Add the passed NHWC 4D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
247 *
248 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
249 * @param[in] tensor Tensor to set as an argument of the object's kernel.
250 */
251 void add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor);
252
253 /** Returns the number of arguments enqueued per NHWC 4D Tensor object.
254 *
255 * @return The number of arguments enqueued per NHWC 4D Tensor object.
256 */
257 constexpr static unsigned int num_arguments_per_4d_tensor_nhwc()
258 {
259 constexpr unsigned int no_args_per_4d_tensor_nhwc = 9u;
260 return no_args_per_4d_tensor_nhwc;
261 }
262
SiCong Li3e363692017-07-04 15:02:10 +0100263 /** Returns the number of arguments enqueued per 1D array object.
264 *
265 * @return The number of arguments enqueues per 1D array object.
266 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000267 constexpr static unsigned int num_arguments_per_1D_array()
268 {
269 return num_arguments_per_array<1>();
270 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100271 /** Returns the number of arguments enqueued per 1D tensor object.
272 *
273 * @return The number of arguments enqueues per 1D tensor object.
274 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000275 constexpr static unsigned int num_arguments_per_1D_tensor()
276 {
277 return num_arguments_per_tensor<1>();
278 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100279 /** Returns the number of arguments enqueued per 2D tensor object.
280 *
281 * @return The number of arguments enqueues per 2D tensor object.
282 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000283 constexpr static unsigned int num_arguments_per_2D_tensor()
284 {
285 return num_arguments_per_tensor<2>();
286 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100287 /** Returns the number of arguments enqueued per 3D tensor object.
288 *
289 * @return The number of arguments enqueues per 3D tensor object.
290 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000291 constexpr static unsigned int num_arguments_per_3D_tensor()
292 {
293 return num_arguments_per_tensor<3>();
294 }
steniu01868e5412017-07-17 23:16:00 +0100295 /** Returns the number of arguments enqueued per 4D tensor object.
296 *
297 * @return The number of arguments enqueues per 4D tensor object.
298 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000299 constexpr static unsigned int num_arguments_per_4D_tensor()
300 {
301 return num_arguments_per_tensor<4>();
302 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100303 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
304 *
305 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
306 *
307 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
308 * @param[in,out] queue Command queue on which to enqueue the kernel.
309 */
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100310 virtual void run(const Window &window, cl::CommandQueue &queue)
311 {
312 ARM_COMPUTE_UNUSED(window, queue);
313 }
314 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
315 *
316 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
317 *
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100318 * @param[in] tensors A vector containing the tensors to operato on.
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100319 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
320 * @param[in,out] queue Command queue on which to enqueue the kernel.
321 */
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100322 virtual void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100323 {
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100324 ARM_COMPUTE_UNUSED(tensors, window, queue);
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100325 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100326 /** Add the passed parameters to the object's kernel's arguments starting from the index idx.
327 *
328 * @param[in,out] idx Index at which to start adding the arguments. Will be incremented by the number of kernel arguments set.
329 * @param[in] value Value to set as an argument of the object's kernel.
330 */
331 template <typename T>
332 void add_argument(unsigned int &idx, T value)
333 {
334 _kernel.setArg(idx++, value);
335 }
336
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100337 /** Set the Local-Workgroup-Size hint
338 *
339 * @note This method should be called after the configuration of the kernel
340 *
341 * @param[in] lws_hint Local-Workgroup-Size to use
342 */
Anthony Barbierd727e852018-04-20 11:05:29 +0100343 void set_lws_hint(const cl::NDRange &lws_hint)
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100344 {
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100345 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // lws_hint will be overwritten by configure()
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000346 _tuning_params_hint.set_lws(lws_hint);
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100347 }
348
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000349 /** Return the Local-Workgroup-Size hint
350 *
351 * @return Current lws hint
352 */
353 cl::NDRange lws_hint() const
354 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000355 return _tuning_params_hint.get_lws();
356 }
357
358 /** Set the workgroup batch size modifier hint
359 *
360 * @note This method should be called after the configuration of the kernel
361 *
362 * @param[in] wbsm_hint workgroup batch size modifier value
363 */
364 void set_wbsm_hint(const cl_int &wbsm_hint)
365 {
366 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // wbsm_hint will be overwritten by configure()
367 _tuning_params_hint.set_wbsm(wbsm_hint);
368 }
369
370 /** Return the workgroup batch size modifier hint
371 *
372 * @return Current wbsm hint
373 */
374 cl_int wbsm_hint() const
375 {
376 return _tuning_params_hint.get_wbsm();
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000377 }
378
Gian Marcode691f02017-09-08 16:13:11 +0100379 /** Get the configuration ID
380 *
381 * @note The configuration ID can be used by the caller to distinguish different calls of the same OpenCL kernel
382 * In particular, this method can be used by CLScheduler to keep track of the best LWS for each configuration of the same kernel.
383 * The configuration ID should be provided only for the kernels potentially affected by the LWS geometry
384 *
385 * @note This method should be called after the configuration of the kernel
386 *
387 * @return configuration id string
388 */
389 const std::string &config_id() const
390 {
391 return _config_id;
392 }
393
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100394 /** Set the targeted GPU architecture
395 *
396 * @param[in] target The targeted GPU architecture
397 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000398 void set_target(GPUTarget target)
399 {
400 _target = target;
401 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100402
403 /** Set the targeted GPU architecture according to the CL device
404 *
405 * @param[in] device A CL device
406 */
407 void set_target(cl::Device &device);
408
409 /** Get the targeted GPU architecture
410 *
411 * @return The targeted GPU architecture.
412 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000413 GPUTarget get_target() const
414 {
415 return _target;
416 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100417
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100418 /** Get the maximum workgroup size for the device the CLKernelLibrary uses.
419 *
420 * @return The maximum workgroup size value.
421 */
422 size_t get_max_workgroup_size();
Georgios Pinitas1f378ee2017-10-27 13:37:16 +0100423 /** Get the global work size given an execution window
424 *
425 * @param[in] window Execution window
426 *
427 * @return Global work size of the given execution window
428 */
429 static cl::NDRange gws_from_window(const Window &window);
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100430
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100431private:
SiCong Li3e363692017-07-04 15:02:10 +0100432 /** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
433 *
434 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
435 * @param[in] array Array to set as an argument of the object's kernel.
436 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
437 * @param[in] num_dimensions Number of dimensions of the @p array.
438 * @param[in] window Window the kernel will be executed on.
439 */
440 template <typename T, unsigned int dimension_size>
441 void add_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100442 /** Add the passed tensor's parameters to the object's kernel's arguments starting from the index idx.
443 *
444 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
445 * @param[in] tensor Tensor to set as an argument of the object's kernel.
446 * @param[in] window Window the kernel will be executed on.
447 */
448 template <unsigned int dimension_size>
449 void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100450
451protected:
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100452 cl::Kernel _kernel; /**< OpenCL kernel to run */
453 GPUTarget _target; /**< The targeted GPU */
454 std::string _config_id; /**< Configuration ID */
455 size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
456 CLKernelType _type; /**< The CL kernel type */
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100457private:
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000458 CLTuningParams _tuning_params_hint; /**< Tuning parameters hint for the OpenCL kernel */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100459};
460
461/** Add the kernel to the command queue with the given window.
462 *
463 * @note Depending on the size of the window, this might translate into several jobs being enqueued.
464 *
465 * @note If kernel->kernel() is empty then the function will return without adding anything to the queue.
466 *
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000467 * @param[in,out] queue OpenCL command queue.
468 * @param[in] kernel Kernel to enqueue
469 * @param[in] window Window the kernel has to process.
470 * @param[in] lws_hint (Optional) Local workgroup size requested. Default is based on the device target.
471 * @param[in] use_dummy_work_items (Optional) Use dummy work items in order to have two dimensional power of two NDRange. Default is false
472 * Note: it is kernel responsibility to check if the work-item is out-of-range
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100473 *
474 * @note If any dimension of the lws is greater than the global workgroup size then no lws will be passed.
475 */
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000476void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items = false);
SiCong Li3e363692017-07-04 15:02:10 +0100477
Alex Gildayc357c472018-03-21 13:54:09 +0000478/** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
479 *
480 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
481 * @param[in] array Array to set as an argument of the object's kernel.
482 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
483 * @param[in] num_dimensions Number of dimensions of the @p array.
484 * @param[in] window Window the kernel will be executed on.
485 */
SiCong Li3e363692017-07-04 15:02:10 +0100486template <typename T, unsigned int dimension_size>
487void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
488{
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000489 ARM_COMPUTE_ERROR_ON(array == nullptr);
490
SiCong Li3e363692017-07-04 15:02:10 +0100491 // Calculate offset to the start of the window
492 unsigned int offset_first_element = 0;
493
494 for(unsigned int n = 0; n < num_dimensions; ++n)
495 {
496 offset_first_element += window[n].start() * strides[n];
497 }
498
499 unsigned int idx_start = idx;
500 _kernel.setArg(idx++, array->cl_buffer());
501
502 for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
503 {
504 _kernel.setArg<cl_uint>(idx++, strides[dimension]);
505 _kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
506 }
507
508 _kernel.setArg<cl_uint>(idx++, offset_first_element);
509
Michalis Spyrou7c60c992019-10-10 14:33:47 +0100510 ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_array<dimension_size>() != idx,
511 "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
SiCong Li3e363692017-07-04 15:02:10 +0100512 ARM_COMPUTE_UNUSED(idx_start);
513}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100514}
Michalis Spyrouf4643372019-11-29 16:17:13 +0000515#endif /*ARM_COMPUTE_ICLKERNEL_H */