blob: ae3077a564405c477ada1e89d00791c4737076b9 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinibe9f9f92021-01-25 15:07:17 +00002 * Copyright (c) 2016-2021 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_ICLKERNEL_H
25#define ARM_COMPUTE_ICLKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu015f910722017-08-23 10:15:22 +010027#include "arm_compute/core/CL/CLKernelLibrary.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010028#include "arm_compute/core/CL/CLTypes.h"
29#include "arm_compute/core/CL/OpenCL.h"
Michele Di Giorgiob8fc60f2018-04-25 11:58:07 +010030#include "arm_compute/core/GPUTarget.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/IKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010032#include "arm_compute/core/Validate.h"
Michalis Spyrou2aad21a2020-07-02 12:43:53 +010033#include "arm_compute/core/experimental/Types.h"
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000034#include "arm_compute/runtime/CL/CLTuningParams.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
Gian Marcode691f02017-09-08 16:13:11 +010036#include <string>
37
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038namespace arm_compute
39{
Giorgio Arena4a95bba2021-06-28 11:00:27 +010040namespace
41{
42bool is_same_lws(cl::NDRange lws0, cl::NDRange lws1)
43{
44 if(lws0.dimensions() != lws1.dimensions())
45 {
46 return false;
47 }
48
49 for(size_t i = 0; i < lws0.dimensions(); ++i)
50 {
51 if(lws0.get()[i] != lws1.get()[i])
52 {
53 return false;
54 }
55 }
56
57 return true;
58}
59} // namespace
SiCong Li3e363692017-07-04 15:02:10 +010060template <typename T>
61class ICLArray;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010062class ICLTensor;
63class Window;
64
65/** Common interface for all the OpenCL kernels */
66class ICLKernel : public IKernel
67{
Diego Lopez Recas0021d752017-12-18 14:42:56 +000068private:
69 /** Returns the number of arguments enqueued per array object.
70 *
71 * @return The number of arguments enqueued per array object.
72 */
73 template <unsigned int dimension_size>
74 constexpr static unsigned int num_arguments_per_array()
75 {
76 return num_arguments_per_tensor<dimension_size>();
77 }
78 /** Returns the number of arguments enqueued per tensor object.
79 *
80 * @return The number of arguments enqueued per tensor object.
81 */
82 template <unsigned int dimension_size>
83 constexpr static unsigned int num_arguments_per_tensor()
84 {
85 return 2 + 2 * dimension_size;
86 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +010087
88 cl::NDRange default_lws_tune(const Window &window)
89 {
90 ARM_COMPUTE_UNUSED(window);
91 return CLKernelLibrary::get().default_ndrange();
92 }
93
Anthony Barbierb6eb3532018-08-08 13:20:04 +010094 using IKernel::configure; //Prevent children from calling IKernel::configure() directly
Anthony Barbier5a65cfd2018-08-10 14:10:08 +010095protected:
96 /** Configure the kernel's window and local workgroup size hint.
97 *
Manuel Bottinibe9f9f92021-01-25 15:07:17 +000098 * @param[in] window The maximum window which will be returned by window()
99 * @param[in] lws_hint Local-Workgroup-Size to use.
100 * @param[in] wbsm_hint (Optional) Workgroup-Batch-Size-Modifier to use.
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100101 */
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000102 void configure_internal(const Window &window, cl::NDRange lws_hint, cl_int wbsm_hint = 0)
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100103 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000104 configure_internal(window, CLTuningParams(lws_hint, wbsm_hint));
105 }
106
107 /** Configure the kernel's window and tuning parameters hints.
108 *
109 * @param[in] window The maximum window which will be returned by window()
110 * @param[in] tuning_params_hint (Optional) Tuning parameters to use.
111 */
112 void configure_internal(const Window &window, CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(), 0))
113 {
114 _tuning_params_hint = tuning_params_hint;
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100115
116 if(is_same_lws(_tuning_params_hint.get_lws(), CLKernelLibrary::get().default_ndrange()))
117 {
118 _tuning_params_hint.set_lws(default_lws_tune(window));
119 }
120
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100121 IKernel::configure(window);
122 }
123
Anthony Barbier5a65cfd2018-08-10 14:10:08 +0100124public:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100125 /** Constructor */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000126 ICLKernel()
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100127 : _kernel(nullptr), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0), _type(CLKernelType::UNKNOWN), _tuning_params_hint()
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000128 {
129 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100130 /** Returns a reference to the OpenCL kernel of this object.
131 *
132 * @return A reference to the OpenCL kernel of this object.
133 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000134 cl::Kernel &kernel()
135 {
136 return _kernel;
137 }
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100138 /** Returns the CL kernel type
139 *
140 * @return The CL kernel type
141 */
142 CLKernelType type() const
143 {
144 return _type;
145 }
SiCong Li3e363692017-07-04 15:02:10 +0100146 /** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
147 *
148 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
149 * @param[in] array Array to set as an argument of the object's kernel.
150 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
151 * @param[in] num_dimensions Number of dimensions of the @p array.
152 * @param[in] window Window the kernel will be executed on.
153 */
154 template <typename T>
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000155 void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
156 {
157 add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
158 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100159 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
160 *
161 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
162 * @param[in] tensor Tensor to set as an argument of the object's kernel.
163 * @param[in] window Window the kernel will be executed on.
164 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000165 void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
166 {
167 add_tensor_argument<1>(idx, tensor, window);
168 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100169 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
170 *
171 * @param[in] cond Condition to check
172 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
173 * @param[in] tensor Tensor to set as an argument of the object's kernel.
174 * @param[in] window Window the kernel will be executed on.
175 */
176 void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
177 {
178 if(cond)
179 {
180 add_1D_tensor_argument(idx, tensor, window);
181 }
182 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100183 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
184 *
185 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
186 * @param[in] tensor Tensor to set as an argument of the object's kernel.
187 * @param[in] window Window the kernel will be executed on.
188 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000189 void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
190 {
191 add_tensor_argument<2>(idx, tensor, window);
192 }
Michalis Spyroue1651a52019-07-11 15:00:49 +0100193 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
194 *
195 * @param[in] cond Condition to check
196 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
197 * @param[in] tensor Tensor to set as an argument of the object's kernel.
198 * @param[in] window Window the kernel will be executed on.
199 */
200 void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
201 {
202 if(cond)
203 {
204 add_2D_tensor_argument(idx, tensor, window);
205 }
206 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100207 /** Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
208 *
209 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
210 * @param[in] tensor Tensor to set as an argument of the object's kernel.
211 * @param[in] window Window the kernel will be executed on.
212 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000213 void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
214 {
215 add_tensor_argument<3>(idx, tensor, window);
216 }
steniu01868e5412017-07-17 23:16:00 +0100217 /** Add the passed 4D tensor's parameters to the object's kernel's arguments starting from the index idx.
218 *
219 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
220 * @param[in] tensor Tensor to set as an argument of the object's kernel.
221 * @param[in] window Window the kernel will be executed on.
222 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000223 void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
224 {
225 add_tensor_argument<4>(idx, tensor, window);
226 }
SiCong Li3e363692017-07-04 15:02:10 +0100227 /** Returns the number of arguments enqueued per 1D array object.
228 *
229 * @return The number of arguments enqueues per 1D array object.
230 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000231 constexpr static unsigned int num_arguments_per_1D_array()
232 {
233 return num_arguments_per_array<1>();
234 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100235 /** Returns the number of arguments enqueued per 1D tensor object.
236 *
237 * @return The number of arguments enqueues per 1D tensor object.
238 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000239 constexpr static unsigned int num_arguments_per_1D_tensor()
240 {
241 return num_arguments_per_tensor<1>();
242 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100243 /** Returns the number of arguments enqueued per 2D tensor object.
244 *
245 * @return The number of arguments enqueues per 2D tensor object.
246 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000247 constexpr static unsigned int num_arguments_per_2D_tensor()
248 {
249 return num_arguments_per_tensor<2>();
250 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100251 /** Returns the number of arguments enqueued per 3D tensor object.
252 *
253 * @return The number of arguments enqueues per 3D tensor object.
254 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000255 constexpr static unsigned int num_arguments_per_3D_tensor()
256 {
257 return num_arguments_per_tensor<3>();
258 }
steniu01868e5412017-07-17 23:16:00 +0100259 /** Returns the number of arguments enqueued per 4D tensor object.
260 *
261 * @return The number of arguments enqueues per 4D tensor object.
262 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000263 constexpr static unsigned int num_arguments_per_4D_tensor()
264 {
265 return num_arguments_per_tensor<4>();
266 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100267 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
268 *
269 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
270 *
271 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
272 * @param[in,out] queue Command queue on which to enqueue the kernel.
273 */
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100274 virtual void run(const Window &window, cl::CommandQueue &queue)
275 {
276 ARM_COMPUTE_UNUSED(window, queue);
277 }
278 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
279 *
280 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
281 *
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100282 * @param[in] tensors A vector containing the tensors to operato on.
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100283 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
284 * @param[in,out] queue Command queue on which to enqueue the kernel.
285 */
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100286 virtual void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100287 {
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100288 ARM_COMPUTE_UNUSED(tensors, window, queue);
Michalis Spyrou2aad21a2020-07-02 12:43:53 +0100289 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100290 /** Add the passed parameters to the object's kernel's arguments starting from the index idx.
291 *
292 * @param[in,out] idx Index at which to start adding the arguments. Will be incremented by the number of kernel arguments set.
293 * @param[in] value Value to set as an argument of the object's kernel.
294 */
295 template <typename T>
296 void add_argument(unsigned int &idx, T value)
297 {
298 _kernel.setArg(idx++, value);
299 }
300
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100301 /** Set the Local-Workgroup-Size hint
302 *
303 * @note This method should be called after the configuration of the kernel
304 *
305 * @param[in] lws_hint Local-Workgroup-Size to use
306 */
Anthony Barbierd727e852018-04-20 11:05:29 +0100307 void set_lws_hint(const cl::NDRange &lws_hint)
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100308 {
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100309 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // lws_hint will be overwritten by configure()
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000310 _tuning_params_hint.set_lws(lws_hint);
Gian Marco Iodice9331aeb2017-08-10 17:11:08 +0100311 }
312
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000313 /** Return the Local-Workgroup-Size hint
314 *
315 * @return Current lws hint
316 */
317 cl::NDRange lws_hint() const
318 {
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000319 return _tuning_params_hint.get_lws();
320 }
321
322 /** Set the workgroup batch size modifier hint
323 *
324 * @note This method should be called after the configuration of the kernel
325 *
326 * @param[in] wbsm_hint workgroup batch size modifier value
327 */
328 void set_wbsm_hint(const cl_int &wbsm_hint)
329 {
330 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // wbsm_hint will be overwritten by configure()
331 _tuning_params_hint.set_wbsm(wbsm_hint);
332 }
333
334 /** Return the workgroup batch size modifier hint
335 *
336 * @return Current wbsm hint
337 */
338 cl_int wbsm_hint() const
339 {
340 return _tuning_params_hint.get_wbsm();
Georgios Pinitasc0d1c862018-03-23 15:13:15 +0000341 }
342
Gian Marcode691f02017-09-08 16:13:11 +0100343 /** Get the configuration ID
344 *
345 * @note The configuration ID can be used by the caller to distinguish different calls of the same OpenCL kernel
346 * In particular, this method can be used by CLScheduler to keep track of the best LWS for each configuration of the same kernel.
347 * The configuration ID should be provided only for the kernels potentially affected by the LWS geometry
348 *
349 * @note This method should be called after the configuration of the kernel
350 *
351 * @return configuration id string
352 */
353 const std::string &config_id() const
354 {
355 return _config_id;
356 }
357
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100358 /** Set the targeted GPU architecture
359 *
360 * @param[in] target The targeted GPU architecture
361 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000362 void set_target(GPUTarget target)
363 {
364 _target = target;
365 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100366
367 /** Set the targeted GPU architecture according to the CL device
368 *
369 * @param[in] device A CL device
370 */
371 void set_target(cl::Device &device);
372
373 /** Get the targeted GPU architecture
374 *
375 * @return The targeted GPU architecture.
376 */
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000377 GPUTarget get_target() const
378 {
379 return _target;
380 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100381
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100382 /** Get the maximum workgroup size for the device the CLKernelLibrary uses.
383 *
384 * @return The maximum workgroup size value.
385 */
386 size_t get_max_workgroup_size();
Georgios Pinitas1f378ee2017-10-27 13:37:16 +0100387 /** Get the global work size given an execution window
388 *
389 * @param[in] window Execution window
390 *
391 * @return Global work size of the given execution window
392 */
393 static cl::NDRange gws_from_window(const Window &window);
Abel Bernabeu5a6e0532017-09-28 09:53:45 +0100394
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100395private:
SiCong Li3e363692017-07-04 15:02:10 +0100396 /** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
397 *
398 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
399 * @param[in] array Array to set as an argument of the object's kernel.
400 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
401 * @param[in] num_dimensions Number of dimensions of the @p array.
402 * @param[in] window Window the kernel will be executed on.
403 */
404 template <typename T, unsigned int dimension_size>
405 void add_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100406 /** Add the passed tensor's parameters to the object's kernel's arguments starting from the index idx.
407 *
408 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
409 * @param[in] tensor Tensor to set as an argument of the object's kernel.
410 * @param[in] window Window the kernel will be executed on.
411 */
412 template <unsigned int dimension_size>
413 void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100414
415protected:
Giorgio Arena4a95bba2021-06-28 11:00:27 +0100416 cl::Kernel _kernel; /**< OpenCL kernel to run */
417 GPUTarget _target; /**< The targeted GPU */
418 std::string _config_id; /**< Configuration ID */
419 size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
420 CLKernelType _type; /**< The CL kernel type */
Anthony Barbierb6eb3532018-08-08 13:20:04 +0100421private:
Manuel Bottinibe9f9f92021-01-25 15:07:17 +0000422 CLTuningParams _tuning_params_hint; /**< Tuning parameters hint for the OpenCL kernel */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100423};
424
425/** Add the kernel to the command queue with the given window.
426 *
427 * @note Depending on the size of the window, this might translate into several jobs being enqueued.
428 *
429 * @note If kernel->kernel() is empty then the function will return without adding anything to the queue.
430 *
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000431 * @param[in,out] queue OpenCL command queue.
432 * @param[in] kernel Kernel to enqueue
433 * @param[in] window Window the kernel has to process.
434 * @param[in] lws_hint (Optional) Local workgroup size requested. Default is based on the device target.
435 * @param[in] use_dummy_work_items (Optional) Use dummy work items in order to have two dimensional power of two NDRange. Default is false
436 * Note: it is kernel responsibility to check if the work-item is out-of-range
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100437 *
438 * @note If any dimension of the lws is greater than the global workgroup size then no lws will be passed.
439 */
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000440void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items = false);
SiCong Li3e363692017-07-04 15:02:10 +0100441
Alex Gildayc357c472018-03-21 13:54:09 +0000442/** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
443 *
444 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
445 * @param[in] array Array to set as an argument of the object's kernel.
446 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
447 * @param[in] num_dimensions Number of dimensions of the @p array.
448 * @param[in] window Window the kernel will be executed on.
449 */
SiCong Li3e363692017-07-04 15:02:10 +0100450template <typename T, unsigned int dimension_size>
451void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
452{
Diego Lopez Recas0021d752017-12-18 14:42:56 +0000453 ARM_COMPUTE_ERROR_ON(array == nullptr);
454
SiCong Li3e363692017-07-04 15:02:10 +0100455 // Calculate offset to the start of the window
456 unsigned int offset_first_element = 0;
457
458 for(unsigned int n = 0; n < num_dimensions; ++n)
459 {
460 offset_first_element += window[n].start() * strides[n];
461 }
462
463 unsigned int idx_start = idx;
464 _kernel.setArg(idx++, array->cl_buffer());
465
466 for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
467 {
468 _kernel.setArg<cl_uint>(idx++, strides[dimension]);
469 _kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
470 }
471
472 _kernel.setArg<cl_uint>(idx++, offset_first_element);
473
Michalis Spyrou7c60c992019-10-10 14:33:47 +0100474 ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_array<dimension_size>() != idx,
475 "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
SiCong Li3e363692017-07-04 15:02:10 +0100476 ARM_COMPUTE_UNUSED(idx_start);
477}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100478}
Michalis Spyrouf4643372019-11-29 16:17:13 +0000479#endif /*ARM_COMPUTE_ICLKERNEL_H */