blob: 4dd491221e914fe3b8874c2abe4f85c7ada271ad [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +00002 * Copyright (c) 2018-2021 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Michalis Spyrouad7515d2020-07-24 00:02:23 +010027#include "arm_compute/runtime/CL/ICLOperator.h"
28#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000029
30namespace arm_compute
31{
32class ICLTensor;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010033class CLCompileContext;
34class ITensorInfo;
giuros01164a2722018-11-20 18:34:46 +000035
Michalis Spyrouad7515d2020-07-24 00:02:23 +010036namespace experimental
37{
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +000038/** Basic function to run @ref arm_compute::opencl::kernels::ClSaturatedArithmeticKernel for subtraction
giuros01164a2722018-11-20 18:34:46 +000039 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010040 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +000041 * @note The function performs an arithmetic subtraction between two tensors.
42 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010043class CLArithmeticSubtraction : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +000044{
45public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +010046 /** Default Constructor */
47 CLArithmeticSubtraction();
48 /** Initialise the kernel's inputs, output and conversion policy.
49 *
50 * Valid configurations (Input1,Input2) -> Output :
51 *
52 * - (U8,U8) -> U8
53 * - (U8,U8) -> S16
54 * - (S16,U8) -> S16
55 * - (U8,S16) -> S16
56 * - (S16,S16) -> S16
57 * - (S32,S32) -> S32
58 * - (F16,F16) -> F16
59 * - (F32,F32) -> F32
60 * - (QASYMM8,QASYMM8) -> QASYMM8
61 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
62 * - (QSYMM16,QSYMM16) -> QSYMM16
63 *
64 * @param[in] compile_context The compile context to be used.
65 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
66 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
67 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
68 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
69 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
70 * @param[in] policy Policy to use to handle overflow.
71 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
72 */
73 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
74 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +000075 /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClSaturatedArithmeticKernel for subtraction
Michalis Spyrouad7515d2020-07-24 00:02:23 +010076 *
77 * Valid configurations (Input1,Input2) -> Output :
78 *
79 * - (U8,U8) -> U8
80 * - (U8,U8) -> S16
81 * - (S16,U8) -> S16
82 * - (U8,S16) -> S16
83 * - (S16,S16) -> S16
84 * - (S32,S32) -> S32
85 * - (F16,F16) -> F16
86 * - (F32,F32) -> F32
87 * - (QASYMM8,QASYMM8) -> QASYMM8
88 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
89 * - (QSYMM16,QSYMM16) -> QSYMM16
90 *
91 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
92 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
93 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
94 * @param[in] policy Policy to use to handle overflow.
95 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
96 *
97 * @return a status
98 */
99 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
100
101 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100102 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100103};
104
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000105/** Basic function to run @ref arm_compute::opencl::kernels::ClSaturatedArithmeticKernel for division
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100106 *
107 * @note The tensor data type for the inputs must be F16/F32.
108 * @note The function performs an arithmetic division between two tensors.
109 */
110class CLArithmeticDivision : public ICLOperator
111{
112public:
113 /** Default Constructor */
114 CLArithmeticDivision();
115 /** Initialise the kernel's inputs, output.
116 *
117 * @param[in] compile_context The compile context to be used.
118 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
119 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
120 * @param[in, out] input2 Second tensor input. Same as @p input1.
121 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
122 * @param[out] output Output tensor. Data types supported: Same as @p input1.
123 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
124 */
125 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
126 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
127 *
128 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
129 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
130 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
131 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
132 *
133 * @return a status
134 */
135 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
136
137 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100138 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100139};
140
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000141/** Basic function to run @ref arm_compute::opencl::kernels::ClArithmeticKernel for max
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100142 *
143 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
144 * @note The function performs a max operation between two tensors.
145 */
146class CLElementwiseMax : public ICLOperator
147{
148public:
149 /** Default Constructor */
150 CLElementwiseMax();
151 /** Initialise the kernel's inputs, output and conversion policy.
152 *
153 * @param[in] compile_context The compile context to be used.
154 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
155 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
156 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
157 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
158 * @param[out] output Output tensor. Data types supported: same as @p input1.
159 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
160 */
161 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000162 /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClArithmeticKernel for max
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100163 *
164 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
165 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
166 * @param[in] output Output tensor info. Data types supported: same as @p input1.
167 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
168 *
169 * @return a status
170 */
171 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
172
173 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100174 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100175};
176
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000177/** Basic function to run @ref arm_compute::opencl::kernels::ClArithmeticKernel for min
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100178 *
179 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
180 * @note The function performs a max operation between two tensors.
181 */
182class CLElementwiseMin : public ICLOperator
183{
184public:
185 /** Default Constructor */
186 CLElementwiseMin();
187 /** Initialise the kernel's inputs, output and conversion policy.
188 *
189 * @param[in] compile_context The compile context to be used.
190 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
191 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
192 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
193 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
194 * @param[out] output Output tensor. Data types supported: same as @p input1.
195 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
196 */
197 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000198 /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClArithmeticKernel for min
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100199 *
200 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
201 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
202 * @param[in] output Output tensor info. Data types supported: same as @p input1.
203 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
204 *
205 * @return a status
206 */
207 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
208
209 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100210 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100211};
212
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000213/** Basic function to run @ref arm_compute::opencl::kernels::ClArithmeticKernel for squared difference
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100214 *
215 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
216 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
217 */
218class CLElementwiseSquaredDiff : public ICLOperator
219{
220public:
221 /** Default Constructor */
222 CLElementwiseSquaredDiff();
223 /** Initialise the kernel's inputs, output and conversion policy.
224 *
225 * @param[in] compile_context The compile context to be used.
226 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
227 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
228 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
229 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
230 * @param[out] output Output tensor. Data types supported: same as @p input1.
231 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
232 */
233 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000234 /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClArithmeticKernel for squared difference
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100235 *
236 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
237 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
238 * @param[in] output Output tensor info. Data types supported: same as @p input1.
239 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
240 *
241 * @return a status
242 */
243 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
244
245 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100246 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100247};
248
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000249/** Basic function to run @ref arm_compute::opencl::kernels::ClArithmeticKernel for power
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100250 *
251 * @note The tensor data type for the inputs must be F16/F32.
252 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
253 */
254class CLElementwisePower : public ICLOperator
255{
256public:
257 /** Default Constructor */
258 CLElementwisePower();
259 /** Initialise the kernel's inputs, output and conversion policy.
260 *
261 * @param[in] compile_context The compile context to be used.
262 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
263 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
264 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
265 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
266 * @param[out] output Output tensor. Data types supported:F16/F32.
267 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
268 */
269 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000270 /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClArithmeticKernel for power
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100271 *
272 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
273 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
274 * @param[in] output Output tensor info. Data types supported: F16/F32.
275 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
276 *
277 * @return a status
278 */
279 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
280
281 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100282 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100283};
284} // namespace experimental
285
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000286/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100287 *
288 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
289 * @note The function performs an arithmetic addition between two tensors.
290 */
291class CLArithmeticAddition : public IFunction
292{
293public:
294 /** Default Constructor */
295 CLArithmeticAddition();
296 /** Default Destructor */
297 ~CLArithmeticAddition();
298 /** Prevent instances of this class from being copied (As this class contains pointers) */
299 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
300 /** Default move constructor */
301 CLArithmeticAddition(CLArithmeticAddition &&);
302 /** Prevent instances of this class from being copied (As this class contains pointers) */
303 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
304 /** Default move assignment operator */
305 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +0000306 /** Initialise the kernel's inputs, output and conversion policy.
307 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100308 * Valid configurations (Input1,Input2) -> Output :
309 *
310 * - (U8,U8) -> U8
311 * - (U8,U8) -> S16
312 * - (S16,U8) -> S16
313 * - (U8,S16) -> S16
314 * - (S16,S16) -> S16
315 * - (S32,S32) -> S32
316 * - (F16,F16) -> F16
317 * - (F32,F32) -> F32
318 * - (QASYMM8,QASYMM8) -> QASYMM8
319 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
320 * - (QSYMM16,QSYMM16) -> QSYMM16
321 *
322 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000323 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100324 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000325 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100326 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000327 * @param[in] policy Policy to use to handle overflow.
328 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000329 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000330 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100331 /** Initialise the kernel's inputs, output and conversion policy.
332 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100333 * Valid configurations (Input1,Input2) -> Output :
334 *
335 * - (U8,U8) -> U8
336 * - (U8,U8) -> S16
337 * - (S16,U8) -> S16
338 * - (U8,S16) -> S16
339 * - (S16,S16) -> S16
340 * - (S32,S32) -> S32
341 * - (F16,F16) -> F16
342 * - (F32,F32) -> F32
343 * - (QASYMM8,QASYMM8) -> QASYMM8
344 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
345 * - (QSYMM16,QSYMM16) -> QSYMM16
346 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100347 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100348 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100349 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100350 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100351 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100352 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100353 * @param[in] policy Policy to use to handle overflow.
354 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
355 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100356 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
357 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000358 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100359 *
360 * Valid configurations (Input1,Input2) -> Output :
361 *
362 * - (U8,U8) -> U8
363 * - (U8,U8) -> S16
364 * - (S16,U8) -> S16
365 * - (U8,S16) -> S16
366 * - (S16,S16) -> S16
367 * - (S32,S32) -> S32
368 * - (F16,F16) -> F16
369 * - (F32,F32) -> F32
370 * - (QASYMM8,QASYMM8) -> QASYMM8
371 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
372 * - (QSYMM16,QSYMM16) -> QSYMM16
373 *
374 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
375 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
376 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
377 * @param[in] policy Policy to use to handle overflow.
378 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
379 *
380 * @return a status
381 */
382 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
383
384 // Inherited methods overridden:
385 void run() override;
386
387private:
388 struct Impl;
389 std::unique_ptr<Impl> _impl;
390};
391
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000392/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100393 *
394 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
395 * @note The function performs an arithmetic subtraction between two tensors.
396 */
397class CLArithmeticSubtraction : public IFunction
398{
399public:
400 /** Default Constructor */
401 CLArithmeticSubtraction();
402 /** Default Destructor */
403 ~CLArithmeticSubtraction();
404 /** Prevent instances of this class from being copied (As this class contains pointers) */
405 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
406 /** Default move constructor */
407 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
408 /** Prevent instances of this class from being copied (As this class contains pointers) */
409 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
410 /** Default move assignment operator */
411 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
412 /** Initialise the kernel's inputs, output and conversion policy.
413 *
414 * Valid configurations (Input1,Input2) -> Output :
415 *
416 * - (U8,U8) -> U8
417 * - (U8,U8) -> S16
418 * - (S16,U8) -> S16
419 * - (U8,S16) -> S16
420 * - (S16,S16) -> S16
421 * - (S32,S32) -> S32
422 * - (F16,F16) -> F16
423 * - (F32,F32) -> F32
424 * - (QASYMM8,QASYMM8) -> QASYMM8
425 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
426 * - (QSYMM16,QSYMM16) -> QSYMM16
427 *
428 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
429 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
430 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
431 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
432 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
433 * @param[in] policy Policy to use to handle overflow.
434 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
435 */
436 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
437 /** Initialise the kernel's inputs, output and conversion policy.
438 *
439 * Valid configurations (Input1,Input2) -> Output :
440 *
441 * - (U8,U8) -> U8
442 * - (U8,U8) -> S16
443 * - (S16,U8) -> S16
444 * - (U8,S16) -> S16
445 * - (S16,S16) -> S16
446 * - (S32,S32) -> S32
447 * - (F16,F16) -> F16
448 * - (F32,F32) -> F32
449 * - (QASYMM8,QASYMM8) -> QASYMM8
450 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
451 * - (QSYMM16,QSYMM16) -> QSYMM16
452 *
453 * @param[in] compile_context The compile context to be used.
454 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
455 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
456 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
457 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
458 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
459 * @param[in] policy Policy to use to handle overflow.
460 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
461 */
462 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
463 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000464 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
giuros01164a2722018-11-20 18:34:46 +0000465 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100466 * Valid configurations (Input1,Input2) -> Output :
467 *
468 * - (U8,U8) -> U8
469 * - (U8,U8) -> S16
470 * - (S16,U8) -> S16
471 * - (U8,S16) -> S16
472 * - (S16,S16) -> S16
473 * - (S32,S32) -> S32
474 * - (F16,F16) -> F16
475 * - (F32,F32) -> F32
476 * - (QASYMM8,QASYMM8) -> QASYMM8
477 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
478 * - (QSYMM16,QSYMM16) -> QSYMM16
479 *
480 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
481 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
482 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000483 * @param[in] policy Policy to use to handle overflow.
484 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000485 *
486 * @return a status
487 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000488 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100489
490 // Inherited methods overridden:
491 void run() override;
492
493private:
494 struct Impl;
495 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000496};
497
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000498/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for division
giuros01164a2722018-11-20 18:34:46 +0000499 *
500 * @note The tensor data type for the inputs must be F16/F32.
501 * @note The function performs an arithmetic division between two tensors.
502 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100503class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000504{
505public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100506 /** Default Constructor */
507 CLArithmeticDivision();
508 /** Default Destructor */
509 ~CLArithmeticDivision();
510 /** Prevent instances of this class from being copied (As this class contains pointers) */
511 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
512 /** Default move constructor */
513 CLArithmeticDivision(CLArithmeticDivision &&);
514 /** Prevent instances of this class from being copied (As this class contains pointers) */
515 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
516 /** Default move assignment operator */
517 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000518 /** Initialise the kernel's inputs, output.
519 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000520 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
521 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
522 * @param[in, out] input2 Second tensor input. Same as @p input1.
523 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
524 * @param[out] output Output tensor. Data types supported: Same as @p input1.
525 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000526 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000527 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100528 /** Initialise the kernel's inputs, output.
529 *
530 * @param[in] compile_context The compile context to be used.
531 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
532 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
533 * @param[in, out] input2 Second tensor input. Same as @p input1.
534 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
535 * @param[out] output Output tensor. Data types supported: Same as @p input1.
536 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
537 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100538 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000539 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
540 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000541 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
542 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
543 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
544 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000545 *
546 * @return a status
547 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000548 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100549
550 // Inherited methods overridden:
551 void run() override;
552
553private:
554 struct Impl;
555 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000556};
557
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000558/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000559 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100560 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000561 * @note The function performs a max operation between two tensors.
562 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100563class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000564{
565public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100566 /** Default Constructor */
567 CLElementwiseMax();
568 /** Default Destructor */
569 ~CLElementwiseMax();
570 /** Prevent instances of this class from being copied (As this class contains pointers) */
571 CLElementwiseMax(const CLElementwiseMax &) = delete;
572 /** Default move constructor */
573 CLElementwiseMax(CLElementwiseMax &&);
574 /** Prevent instances of this class from being copied (As this class contains pointers) */
575 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
576 /** Default move assignment operator */
577 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000578 /** Initialise the kernel's inputs, output and conversion policy.
579 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100580 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000581 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100582 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000583 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100584 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000585 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000586 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000587 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100588 /** Initialise the kernel's inputs, output and conversion policy.
589 *
590 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100591 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100592 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100593 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100594 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100595 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100596 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
597 */
598 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000599 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000600 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100601 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
602 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
603 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000604 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000605 *
606 * @return a status
607 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000608 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100609
610 // Inherited methods overridden:
611 void run() override;
612
613private:
614 struct Impl;
615 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000616};
617
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000618/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000619 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100620 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000621 * @note The function performs a max operation between two tensors.
622 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100623class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000624{
625public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100626 /** Default Constructor */
627 CLElementwiseMin();
628 /** Default Destructor */
629 ~CLElementwiseMin();
630 /** Prevent instances of this class from being copied (As this class contains pointers) */
631 CLElementwiseMin(const CLElementwiseMin &) = delete;
632 /** Default move constructor */
633 CLElementwiseMin(CLElementwiseMin &&);
634 /** Prevent instances of this class from being copied (As this class contains pointers) */
635 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
636 /** Default move assignment operator */
637 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000638 /** Initialise the kernel's inputs, output and conversion policy.
639 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100640 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000641 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100642 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000643 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100644 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000645 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000646 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000647 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100648 /** Initialise the kernel's inputs, output and conversion policy.
649 *
650 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100651 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100652 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100653 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100654 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100655 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100656 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
657 */
658 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000659 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000660 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100661 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
662 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
663 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000664 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000665 *
666 * @return a status
667 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000668 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100669
670 // Inherited methods overridden:
671 void run() override;
672
673private:
674 struct Impl;
675 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000676};
677
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000678/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000679 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100680 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000681 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
682 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100683class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000684{
685public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100686 /** Default Constructor */
687 CLElementwiseSquaredDiff();
688 /** Default Destructor */
689 ~CLElementwiseSquaredDiff();
690 /** Prevent instances of this class from being copied (As this class contains pointers) */
691 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
692 /** Default move constructor */
693 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
694 /** Prevent instances of this class from being copied (As this class contains pointers) */
695 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
696 /** Default move assignment operator */
697 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000698 /** Initialise the kernel's inputs, output and conversion policy.
699 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100700 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000701 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100702 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000703 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100704 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000705 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000706 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000707 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100708 /** Initialise the kernel's inputs, output and conversion policy.
709 *
710 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100711 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100712 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100713 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100714 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100715 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100716 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
717 */
718 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000719 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000720 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100721 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
722 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
723 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000724 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000725 *
726 * @return a status
727 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000728 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100729
730 // Inherited methods overridden:
731 void run() override;
732
733private:
734 struct Impl;
735 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000736};
Usama Arif52c54f62019-05-14 10:22:36 +0100737
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000738/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100739 *
740 * @note The tensor data type for the inputs must be F16/F32.
741 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
742 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100743class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100744{
745public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100746 /** Default Constructor */
747 CLElementwisePower();
748 /** Default Destructor */
749 ~CLElementwisePower();
750 /** Prevent instances of this class from being copied (As this class contains pointers) */
751 CLElementwisePower(const CLElementwisePower &) = delete;
752 /** Default move constructor */
753 CLElementwisePower(CLElementwisePower &&);
754 /** Prevent instances of this class from being copied (As this class contains pointers) */
755 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
756 /** Default move assignment operator */
757 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100758 /** Initialise the kernel's inputs, output and conversion policy.
759 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000760 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
761 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
762 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
763 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
764 * @param[out] output Output tensor. Data types supported:F16/F32.
765 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100766 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000767 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100768 /** Initialise the kernel's inputs, output and conversion policy.
769 *
770 * @param[in] compile_context The compile context to be used.
771 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
772 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
773 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
774 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
775 * @param[out] output Output tensor. Data types supported:F16/F32.
776 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
777 */
778 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000779 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100780 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000781 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
782 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
783 * @param[in] output Output tensor info. Data types supported: F16/F32.
784 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100785 *
786 * @return a status
787 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000788 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100789
790 // Inherited methods overridden:
791 void run() override;
792
793private:
794 struct Impl;
795 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100796};
giuros01164a2722018-11-20 18:34:46 +0000797} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000798#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */