blob: 3b3217d1d8d8ea8713c790e26d55f5a0129350ce [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinibe9f9f92021-01-25 15:07:17 +00002 * Copyright (c) 2016-2021 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_ICLKERNEL_H
25#define ARM_COMPUTE_ICLKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu015f910722017-08-23 10:15:22 +010027#include "arm_compute/core/CL/CLKernelLibrary.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/CL/CLTypes.h"
29#include "arm_compute/core/CL/OpenCL.h"
Michele Di Giorgiob8fc60f2018-04-25 11:58:07 +010030#include "arm_compute/core/GPUTarget.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/IKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010032#include "arm_compute/core/Validate.h"
Michalis Spyrou2aad21a2020-07-02 12:43:53 +010033#include "arm_compute/core/experimental/Types.h"
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000034#include "arm_compute/runtime/CL/CLTuningParams.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
Giorgio Arenaba2dd822021-07-28 16:10:03 +010036#include "src/core/CL/DefaultLWSHeuristics.h"
37
Gian Marcode691f02017-09-08 16:13:11 +010038#include <string>
39
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040namespace arm_compute
41{
Giorgio Arena4a95bba2021-06-28 11:00:27 +010042namespace
43{
44bool is_same_lws(cl::NDRange lws0, cl::NDRange lws1)
45{
46 if(lws0.dimensions() != lws1.dimensions())
47 {
48 return false;
49 }
50
51 for(size_t i = 0; i < lws0.dimensions(); ++i)
52 {
53 if(lws0.get()[i] != lws1.get()[i])
54 {
55 return false;
56 }
57 }
58
59 return true;
60}
61} // namespace
SiCong Li3e363692017-07-04 15:02:10 +010062template <typename T>
63class ICLArray;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064class ICLTensor;
65class Window;
66
67/** Common interface for all the OpenCL kernels */
68class ICLKernel : public IKernel
69{
Diego Lopez Recas0021d752017-12-18 14:42:56 +000070private:
71 /** Returns the number of arguments enqueued per array object.
72 *
73 * @return The number of arguments enqueued per array object.
74 */
75 template <unsigned int dimension_size>
76 constexpr static unsigned int num_arguments_per_array()
77 {
78 return num_arguments_per_tensor<dimension_size>();
79 }
80 /** Returns the number of arguments enqueued per tensor object.
81 *
82 * @return The number of arguments enqueued per tensor object.
83 */
84 template <unsigned int dimension_size>
85 constexpr static unsigned int num_arguments_per_tensor()
86 {
87 return 2 + 2 * dimension_size;
88 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +010089
90 cl::NDRange default_lws_tune(const Window &window)
91 {
Giorgio Arenaba2dd822021-07-28 16:10:03 +010092 return get_default_lws_for_type(_type, gws_from_window(window));
Giorgio Arena4a95bba2021-06-28 11:00:27 +010093 }
94
Anthony Barbierb6eb3532018-08-08 13:20:04 +010095 using IKernel::configure; //Prevent children from calling IKernel::configure() directly
Anthony Barbier5a65cfd2018-08-10 14:10:08 +010096protected:
97 /** Configure the kernel's window and local workgroup size hint.
98 *
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000099 * @param[in] window The maximum window which will be returned by window()
100 * @param[in] lws_hint Local-Workgroup-Size to use.
101 * @param[in] wbsm_hint (Optional) Workgroup-Batch-Size-Modifier to use.
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100102 */
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000103 void configure_internal(const Window &window, cl::NDRange lws_hint, cl_int wbsm_hint = 0)
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100104 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000105 configure_internal(window, CLTuningParams(lws_hint, wbsm_hint));
106 }
107
108 /** Configure the kernel's window and tuning parameters hints.
109 *
110 * @param[in] window The maximum window which will be returned by window()
111 * @param[in] tuning_params_hint (Optional) Tuning parameters to use.
112 */
113 void configure_internal(const Window &window, CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(), 0))
114 {
115 _tuning_params_hint = tuning_params_hint;
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100116
117 if(is_same_lws(_tuning_params_hint.get_lws(), CLKernelLibrary::get().default_ndrange()))
118 {
119 _tuning_params_hint.set_lws(default_lws_tune(window));
120 }
121
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100122 IKernel::configure(window);
123 }
124
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100125public:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100126 /** Constructor */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000127 ICLKernel()
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100128 : _kernel(nullptr), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0), _type(CLKernelType::UNKNOWN), _tuning_params_hint()
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000129 {
130 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131 /** Returns a reference to the OpenCL kernel of this object.
132 *
133 * @return A reference to the OpenCL kernel of this object.
134 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000135 cl::Kernel &kernel()
136 {
137 return _kernel;
138 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100139 /** Returns the CL kernel type
140 *
141 * @return The CL kernel type
142 */
143 CLKernelType type() const
144 {
145 return _type;
146 }
SiCong Li3e363692017-07-04 15:02:10 +0100147 /** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
148 *
149 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
150 * @param[in] array Array to set as an argument of the object's kernel.
151 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
152 * @param[in] num_dimensions Number of dimensions of the @p array.
153 * @param[in] window Window the kernel will be executed on.
154 */
155 template <typename T>
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000156 void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
157 {
158 add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
159 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100160 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
161 *
162 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
163 * @param[in] tensor Tensor to set as an argument of the object's kernel.
164 * @param[in] window Window the kernel will be executed on.
165 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000166 void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
167 {
168 add_tensor_argument<1>(idx, tensor, window);
169 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100170 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
171 *
172 * @param[in] cond Condition to check
173 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
174 * @param[in] tensor Tensor to set as an argument of the object's kernel.
175 * @param[in] window Window the kernel will be executed on.
176 */
177 void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
178 {
179 if(cond)
180 {
181 add_1D_tensor_argument(idx, tensor, window);
182 }
183 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100184 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
185 *
186 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
187 * @param[in] tensor Tensor to set as an argument of the object's kernel.
188 * @param[in] window Window the kernel will be executed on.
189 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000190 void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
191 {
192 add_tensor_argument<2>(idx, tensor, window);
193 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100194 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
195 *
196 * @param[in] cond Condition to check
197 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
198 * @param[in] tensor Tensor to set as an argument of the object's kernel.
199 * @param[in] window Window the kernel will be executed on.
200 */
201 void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
202 {
203 if(cond)
204 {
205 add_2D_tensor_argument(idx, tensor, window);
206 }
207 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100208 /** Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
209 *
210 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
211 * @param[in] tensor Tensor to set as an argument of the object's kernel.
212 * @param[in] window Window the kernel will be executed on.
213 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000214 void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
215 {
216 add_tensor_argument<3>(idx, tensor, window);
217 }
steniu01868e5412017-07-17 23:16:00 +0100218 /** Add the passed 4D tensor's parameters to the object's kernel's arguments starting from the index idx.
219 *
220 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
221 * @param[in] tensor Tensor to set as an argument of the object's kernel.
222 * @param[in] window Window the kernel will be executed on.
223 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000224 void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
225 {
226 add_tensor_argument<4>(idx, tensor, window);
227 }
SiCong Li3e363692017-07-04 15:02:10 +0100228 /** Returns the number of arguments enqueued per 1D array object.
229 *
230 * @return The number of arguments enqueues per 1D array object.
231 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000232 constexpr static unsigned int num_arguments_per_1D_array()
233 {
234 return num_arguments_per_array<1>();
235 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100236 /** Returns the number of arguments enqueued per 1D tensor object.
237 *
238 * @return The number of arguments enqueues per 1D tensor object.
239 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000240 constexpr static unsigned int num_arguments_per_1D_tensor()
241 {
242 return num_arguments_per_tensor<1>();
243 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100244 /** Returns the number of arguments enqueued per 2D tensor object.
245 *
246 * @return The number of arguments enqueues per 2D tensor object.
247 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000248 constexpr static unsigned int num_arguments_per_2D_tensor()
249 {
250 return num_arguments_per_tensor<2>();
251 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100252 /** Returns the number of arguments enqueued per 3D tensor object.
253 *
254 * @return The number of arguments enqueues per 3D tensor object.
255 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000256 constexpr static unsigned int num_arguments_per_3D_tensor()
257 {
258 return num_arguments_per_tensor<3>();
259 }
steniu01868e5412017-07-17 23:16:00 +0100260 /** Returns the number of arguments enqueued per 4D tensor object.
261 *
262 * @return The number of arguments enqueues per 4D tensor object.
263 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000264 constexpr static unsigned int num_arguments_per_4D_tensor()
265 {
266 return num_arguments_per_tensor<4>();
267 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100268 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
269 *
270 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
271 *
272 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
273 * @param[in,out] queue Command queue on which to enqueue the kernel.
274 */
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100275 virtual void run(const Window &window, cl::CommandQueue &queue)
276 {
277 ARM_COMPUTE_UNUSED(window, queue);
278 }
279 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
280 *
281 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
282 *
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100283 * @param[in] tensors A vector containing the tensors to operato on.
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100284 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
285 * @param[in,out] queue Command queue on which to enqueue the kernel.
286 */
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100287 virtual void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100288 {
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100289 ARM_COMPUTE_UNUSED(tensors, window, queue);
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100290 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100291 /** Add the passed parameters to the object's kernel's arguments starting from the index idx.
292 *
293 * @param[in,out] idx Index at which to start adding the arguments. Will be incremented by the number of kernel arguments set.
294 * @param[in] value Value to set as an argument of the object's kernel.
295 */
296 template <typename T>
297 void add_argument(unsigned int &idx, T value)
298 {
299 _kernel.setArg(idx++, value);
300 }
301
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100302 /** Set the Local-Workgroup-Size hint
303 *
304 * @note This method should be called after the configuration of the kernel
305 *
306 * @param[in] lws_hint Local-Workgroup-Size to use
307 */
Anthony Barbierd727e852018-04-20 11:05:29 +0100308 void set_lws_hint(const cl::NDRange &lws_hint)
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100309 {
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100310 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // lws_hint will be overwritten by configure()
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000311 _tuning_params_hint.set_lws(lws_hint);
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100312 }
313
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000314 /** Return the Local-Workgroup-Size hint
315 *
316 * @return Current lws hint
317 */
318 cl::NDRange lws_hint() const
319 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000320 return _tuning_params_hint.get_lws();
321 }
322
323 /** Set the workgroup batch size modifier hint
324 *
325 * @note This method should be called after the configuration of the kernel
326 *
327 * @param[in] wbsm_hint workgroup batch size modifier value
328 */
329 void set_wbsm_hint(const cl_int &wbsm_hint)
330 {
331 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // wbsm_hint will be overwritten by configure()
332 _tuning_params_hint.set_wbsm(wbsm_hint);
333 }
334
335 /** Return the workgroup batch size modifier hint
336 *
337 * @return Current wbsm hint
338 */
339 cl_int wbsm_hint() const
340 {
341 return _tuning_params_hint.get_wbsm();
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000342 }
343
Gian Marcode691f02017-09-08 16:13:11 +0100344 /** Get the configuration ID
345 *
346 * @note The configuration ID can be used by the caller to distinguish different calls of the same OpenCL kernel
347 * In particular, this method can be used by CLScheduler to keep track of the best LWS for each configuration of the same kernel.
348 * The configuration ID should be provided only for the kernels potentially affected by the LWS geometry
349 *
350 * @note This method should be called after the configuration of the kernel
351 *
352 * @return configuration id string
353 */
354 const std::string &config_id() const
355 {
356 return _config_id;
357 }
358
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100359 /** Set the targeted GPU architecture
360 *
361 * @param[in] target The targeted GPU architecture
362 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000363 void set_target(GPUTarget target)
364 {
365 _target = target;
366 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100367
368 /** Set the targeted GPU architecture according to the CL device
369 *
370 * @param[in] device A CL device
371 */
372 void set_target(cl::Device &device);
373
374 /** Get the targeted GPU architecture
375 *
376 * @return The targeted GPU architecture.
377 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000378 GPUTarget get_target() const
379 {
380 return _target;
381 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100382
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100383 /** Get the maximum workgroup size for the device the CLKernelLibrary uses.
384 *
385 * @return The maximum workgroup size value.
386 */
387 size_t get_max_workgroup_size();
Georgios Pinitas1f378ee2017-10-27 13:37:16 +0100388 /** Get the global work size given an execution window
389 *
390 * @param[in] window Execution window
391 *
392 * @return Global work size of the given execution window
393 */
394 static cl::NDRange gws_from_window(const Window &window);
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100395
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100396private:
SiCong Li3e363692017-07-04 15:02:10 +0100397 /** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
398 *
399 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
400 * @param[in] array Array to set as an argument of the object's kernel.
401 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
402 * @param[in] num_dimensions Number of dimensions of the @p array.
403 * @param[in] window Window the kernel will be executed on.
404 */
405 template <typename T, unsigned int dimension_size>
406 void add_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100407 /** Add the passed tensor's parameters to the object's kernel's arguments starting from the index idx.
408 *
409 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
410 * @param[in] tensor Tensor to set as an argument of the object's kernel.
411 * @param[in] window Window the kernel will be executed on.
412 */
413 template <unsigned int dimension_size>
414 void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100415
416protected:
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100417 cl::Kernel _kernel; /**< OpenCL kernel to run */
418 GPUTarget _target; /**< The targeted GPU */
419 std::string _config_id; /**< Configuration ID */
420 size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
421 CLKernelType _type; /**< The CL kernel type */
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100422private:
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000423 CLTuningParams _tuning_params_hint; /**< Tuning parameters hint for the OpenCL kernel */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100424};
425
426/** Add the kernel to the command queue with the given window.
427 *
428 * @note Depending on the size of the window, this might translate into several jobs being enqueued.
429 *
430 * @note If kernel->kernel() is empty then the function will return without adding anything to the queue.
431 *
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000432 * @param[in,out] queue OpenCL command queue.
433 * @param[in] kernel Kernel to enqueue
434 * @param[in] window Window the kernel has to process.
435 * @param[in] lws_hint (Optional) Local workgroup size requested. Default is based on the device target.
436 * @param[in] use_dummy_work_items (Optional) Use dummy work items in order to have two dimensional power of two NDRange. Default is false
437 * Note: it is kernel responsibility to check if the work-item is out-of-range
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100438 *
439 * @note If any dimension of the lws is greater than the global workgroup size then no lws will be passed.
440 */
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000441void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items = false);
SiCong Li3e363692017-07-04 15:02:10 +0100442
Alex Gildayc357c472018-03-21 13:54:09 +0000443/** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
444 *
445 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
446 * @param[in] array Array to set as an argument of the object's kernel.
447 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
448 * @param[in] num_dimensions Number of dimensions of the @p array.
449 * @param[in] window Window the kernel will be executed on.
450 */
SiCong Li3e363692017-07-04 15:02:10 +0100451template <typename T, unsigned int dimension_size>
452void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
453{
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000454 ARM_COMPUTE_ERROR_ON(array == nullptr);
455
SiCong Li3e363692017-07-04 15:02:10 +0100456 // Calculate offset to the start of the window
457 unsigned int offset_first_element = 0;
458
459 for(unsigned int n = 0; n < num_dimensions; ++n)
460 {
461 offset_first_element += window[n].start() * strides[n];
462 }
463
464 unsigned int idx_start = idx;
465 _kernel.setArg(idx++, array->cl_buffer());
466
467 for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
468 {
469 _kernel.setArg<cl_uint>(idx++, strides[dimension]);
470 _kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
471 }
472
473 _kernel.setArg<cl_uint>(idx++, offset_first_element);
474
Michalis Spyrou7c60c992019-10-10 14:33:47 +0100475 ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_array<dimension_size>() != idx,
476 "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
SiCong Li3e363692017-07-04 15:02:10 +0100477 ARM_COMPUTE_UNUSED(idx_start);
478}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100479}
Michalis Spyrouf4643372019-11-29 16:17:13 +0000480#endif /*ARM_COMPUTE_ICLKERNEL_H */