blob: 55c5fb345515b6b2fd00e67e3320907a1a1087fd [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Michalis Spyrouad7515d2020-07-24 00:02:23 +010027#include "arm_compute/runtime/CL/ICLOperator.h"
28#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000029
30namespace arm_compute
31{
32class ICLTensor;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010033class CLCompileContext;
34class ITensorInfo;
giuros01164a2722018-11-20 18:34:46 +000035
Michalis Spyrouad7515d2020-07-24 00:02:23 +010036namespace experimental
37{
giuros01164a2722018-11-20 18:34:46 +000038/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
39 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010040 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +000041 * @note The function performs an arithmetic addition between two tensors.
42 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010043class CLArithmeticAddition : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +000044{
45public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +010046 /** Default Constructor */
47 CLArithmeticAddition();
Manuel Bottini2b84be52020-04-08 10:15:51 +010048 /** Initialise the kernel's inputs, output and conversion policy.
49 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010050 * Valid configurations (Input1,Input2) -> Output :
51 *
52 * - (U8,U8) -> U8
53 * - (U8,U8) -> S16
54 * - (S16,U8) -> S16
55 * - (U8,S16) -> S16
56 * - (S16,S16) -> S16
57 * - (S32,S32) -> S32
58 * - (F16,F16) -> F16
59 * - (F32,F32) -> F32
60 * - (QASYMM8,QASYMM8) -> QASYMM8
61 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
62 * - (QSYMM16,QSYMM16) -> QSYMM16
63 *
Manuel Bottini2b84be52020-04-08 10:15:51 +010064 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010065 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010066 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010067 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010068 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010069 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010070 * @param[in] policy Policy to use to handle overflow.
71 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
72 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010073 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
74 const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +000075 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
76 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010077 * Valid configurations (Input1,Input2) -> Output :
78 *
79 * - (U8,U8) -> U8
80 * - (U8,U8) -> S16
81 * - (S16,U8) -> S16
82 * - (U8,S16) -> S16
83 * - (S16,S16) -> S16
84 * - (S32,S32) -> S32
85 * - (F16,F16) -> F16
86 * - (F32,F32) -> F32
87 * - (QASYMM8,QASYMM8) -> QASYMM8
88 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
89 * - (QSYMM16,QSYMM16) -> QSYMM16
90 *
91 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
92 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
93 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000094 * @param[in] policy Policy to use to handle overflow.
95 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +000096 *
97 * @return a status
98 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000099 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100100
101 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100102 void run(ITensorPack &tensors) override;
giuros01164a2722018-11-20 18:34:46 +0000103};
104
105/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
106 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100107 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000108 * @note The function performs an arithmetic subtraction between two tensors.
109 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100110class CLArithmeticSubtraction : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +0000111{
112public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100113 /** Default Constructor */
114 CLArithmeticSubtraction();
115 /** Initialise the kernel's inputs, output and conversion policy.
116 *
117 * Valid configurations (Input1,Input2) -> Output :
118 *
119 * - (U8,U8) -> U8
120 * - (U8,U8) -> S16
121 * - (S16,U8) -> S16
122 * - (U8,S16) -> S16
123 * - (S16,S16) -> S16
124 * - (S32,S32) -> S32
125 * - (F16,F16) -> F16
126 * - (F32,F32) -> F32
127 * - (QASYMM8,QASYMM8) -> QASYMM8
128 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
129 * - (QSYMM16,QSYMM16) -> QSYMM16
130 *
131 * @param[in] compile_context The compile context to be used.
132 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
133 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
134 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
135 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
136 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
137 * @param[in] policy Policy to use to handle overflow.
138 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
139 */
140 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
141 const ActivationLayerInfo &act_info = ActivationLayerInfo());
142 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
143 *
144 * Valid configurations (Input1,Input2) -> Output :
145 *
146 * - (U8,U8) -> U8
147 * - (U8,U8) -> S16
148 * - (S16,U8) -> S16
149 * - (U8,S16) -> S16
150 * - (S16,S16) -> S16
151 * - (S32,S32) -> S32
152 * - (F16,F16) -> F16
153 * - (F32,F32) -> F32
154 * - (QASYMM8,QASYMM8) -> QASYMM8
155 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
156 * - (QSYMM16,QSYMM16) -> QSYMM16
157 *
158 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
159 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
160 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
161 * @param[in] policy Policy to use to handle overflow.
162 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
163 *
164 * @return a status
165 */
166 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
167
168 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100169 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100170};
171
172/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
173 *
174 * @note The tensor data type for the inputs must be F16/F32.
175 * @note The function performs an arithmetic division between two tensors.
176 */
177class CLArithmeticDivision : public ICLOperator
178{
179public:
180 /** Default Constructor */
181 CLArithmeticDivision();
182 /** Initialise the kernel's inputs, output.
183 *
184 * @param[in] compile_context The compile context to be used.
185 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
186 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
187 * @param[in, out] input2 Second tensor input. Same as @p input1.
188 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
189 * @param[out] output Output tensor. Data types supported: Same as @p input1.
190 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
191 */
192 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
193 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
194 *
195 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
196 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
197 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
198 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
199 *
200 * @return a status
201 */
202 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
203
204 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100205 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100206};
207
208/** Basic function to run @ref CLArithmeticOperationKernel for max
209 *
210 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
211 * @note The function performs a max operation between two tensors.
212 */
213class CLElementwiseMax : public ICLOperator
214{
215public:
216 /** Default Constructor */
217 CLElementwiseMax();
218 /** Initialise the kernel's inputs, output and conversion policy.
219 *
220 * @param[in] compile_context The compile context to be used.
221 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
222 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
223 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
224 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
225 * @param[out] output Output tensor. Data types supported: same as @p input1.
226 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
227 */
228 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
229 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
230 *
231 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
232 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
233 * @param[in] output Output tensor info. Data types supported: same as @p input1.
234 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
235 *
236 * @return a status
237 */
238 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
239
240 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100241 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100242};
243
244/** Basic function to run @ref CLArithmeticOperationKernel for min
245 *
246 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
247 * @note The function performs a max operation between two tensors.
248 */
249class CLElementwiseMin : public ICLOperator
250{
251public:
252 /** Default Constructor */
253 CLElementwiseMin();
254 /** Initialise the kernel's inputs, output and conversion policy.
255 *
256 * @param[in] compile_context The compile context to be used.
257 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
258 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
259 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
260 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
261 * @param[out] output Output tensor. Data types supported: same as @p input1.
262 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
263 */
264 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
265 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
266 *
267 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
268 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
269 * @param[in] output Output tensor info. Data types supported: same as @p input1.
270 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
271 *
272 * @return a status
273 */
274 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
275
276 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100277 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100278};
279
280/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
281 *
282 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
283 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
284 */
285class CLElementwiseSquaredDiff : public ICLOperator
286{
287public:
288 /** Default Constructor */
289 CLElementwiseSquaredDiff();
290 /** Initialise the kernel's inputs, output and conversion policy.
291 *
292 * @param[in] compile_context The compile context to be used.
293 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
294 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
295 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
296 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
297 * @param[out] output Output tensor. Data types supported: same as @p input1.
298 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
299 */
300 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
301 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
302 *
303 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
304 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
305 * @param[in] output Output tensor info. Data types supported: same as @p input1.
306 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
307 *
308 * @return a status
309 */
310 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
311
312 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100313 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100314};
315
316/** Basic function to run @ref CLArithmeticOperationKernel for power
317 *
318 * @note The tensor data type for the inputs must be F16/F32.
319 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
320 */
321class CLElementwisePower : public ICLOperator
322{
323public:
324 /** Default Constructor */
325 CLElementwisePower();
326 /** Initialise the kernel's inputs, output and conversion policy.
327 *
328 * @param[in] compile_context The compile context to be used.
329 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
330 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
331 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
332 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
333 * @param[out] output Output tensor. Data types supported:F16/F32.
334 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
335 */
336 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
337 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power
338 *
339 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
340 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
341 * @param[in] output Output tensor info. Data types supported: F16/F32.
342 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
343 *
344 * @return a status
345 */
346 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
347
348 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100349 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100350};
351} // namespace experimental
352
353/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
354 *
355 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
356 * @note The function performs an arithmetic addition between two tensors.
357 */
358class CLArithmeticAddition : public IFunction
359{
360public:
361 /** Default Constructor */
362 CLArithmeticAddition();
363 /** Default Destructor */
364 ~CLArithmeticAddition();
365 /** Prevent instances of this class from being copied (As this class contains pointers) */
366 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
367 /** Default move constructor */
368 CLArithmeticAddition(CLArithmeticAddition &&);
369 /** Prevent instances of this class from being copied (As this class contains pointers) */
370 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
371 /** Default move assignment operator */
372 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +0000373 /** Initialise the kernel's inputs, output and conversion policy.
374 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100375 * Valid configurations (Input1,Input2) -> Output :
376 *
377 * - (U8,U8) -> U8
378 * - (U8,U8) -> S16
379 * - (S16,U8) -> S16
380 * - (U8,S16) -> S16
381 * - (S16,S16) -> S16
382 * - (S32,S32) -> S32
383 * - (F16,F16) -> F16
384 * - (F32,F32) -> F32
385 * - (QASYMM8,QASYMM8) -> QASYMM8
386 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
387 * - (QSYMM16,QSYMM16) -> QSYMM16
388 *
389 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000390 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100391 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000392 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100393 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000394 * @param[in] policy Policy to use to handle overflow.
395 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000396 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000397 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100398 /** Initialise the kernel's inputs, output and conversion policy.
399 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100400 * Valid configurations (Input1,Input2) -> Output :
401 *
402 * - (U8,U8) -> U8
403 * - (U8,U8) -> S16
404 * - (S16,U8) -> S16
405 * - (U8,S16) -> S16
406 * - (S16,S16) -> S16
407 * - (S32,S32) -> S32
408 * - (F16,F16) -> F16
409 * - (F32,F32) -> F32
410 * - (QASYMM8,QASYMM8) -> QASYMM8
411 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
412 * - (QSYMM16,QSYMM16) -> QSYMM16
413 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100414 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100415 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100416 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100417 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100418 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100419 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100420 * @param[in] policy Policy to use to handle overflow.
421 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
422 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100423 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
424 const ActivationLayerInfo &act_info = ActivationLayerInfo());
425 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
426 *
427 * Valid configurations (Input1,Input2) -> Output :
428 *
429 * - (U8,U8) -> U8
430 * - (U8,U8) -> S16
431 * - (S16,U8) -> S16
432 * - (U8,S16) -> S16
433 * - (S16,S16) -> S16
434 * - (S32,S32) -> S32
435 * - (F16,F16) -> F16
436 * - (F32,F32) -> F32
437 * - (QASYMM8,QASYMM8) -> QASYMM8
438 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
439 * - (QSYMM16,QSYMM16) -> QSYMM16
440 *
441 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
442 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
443 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
444 * @param[in] policy Policy to use to handle overflow.
445 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
446 *
447 * @return a status
448 */
449 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
450
451 // Inherited methods overridden:
452 void run() override;
453
454private:
455 struct Impl;
456 std::unique_ptr<Impl> _impl;
457};
458
459/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
460 *
461 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
462 * @note The function performs an arithmetic subtraction between two tensors.
463 */
464class CLArithmeticSubtraction : public IFunction
465{
466public:
467 /** Default Constructor */
468 CLArithmeticSubtraction();
469 /** Default Destructor */
470 ~CLArithmeticSubtraction();
471 /** Prevent instances of this class from being copied (As this class contains pointers) */
472 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
473 /** Default move constructor */
474 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
475 /** Prevent instances of this class from being copied (As this class contains pointers) */
476 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
477 /** Default move assignment operator */
478 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
479 /** Initialise the kernel's inputs, output and conversion policy.
480 *
481 * Valid configurations (Input1,Input2) -> Output :
482 *
483 * - (U8,U8) -> U8
484 * - (U8,U8) -> S16
485 * - (S16,U8) -> S16
486 * - (U8,S16) -> S16
487 * - (S16,S16) -> S16
488 * - (S32,S32) -> S32
489 * - (F16,F16) -> F16
490 * - (F32,F32) -> F32
491 * - (QASYMM8,QASYMM8) -> QASYMM8
492 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
493 * - (QSYMM16,QSYMM16) -> QSYMM16
494 *
495 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
496 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
497 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
498 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
499 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
500 * @param[in] policy Policy to use to handle overflow.
501 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
502 */
503 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
504 /** Initialise the kernel's inputs, output and conversion policy.
505 *
506 * Valid configurations (Input1,Input2) -> Output :
507 *
508 * - (U8,U8) -> U8
509 * - (U8,U8) -> S16
510 * - (S16,U8) -> S16
511 * - (U8,S16) -> S16
512 * - (S16,S16) -> S16
513 * - (S32,S32) -> S32
514 * - (F16,F16) -> F16
515 * - (F32,F32) -> F32
516 * - (QASYMM8,QASYMM8) -> QASYMM8
517 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
518 * - (QSYMM16,QSYMM16) -> QSYMM16
519 *
520 * @param[in] compile_context The compile context to be used.
521 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
522 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
523 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
524 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
525 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
526 * @param[in] policy Policy to use to handle overflow.
527 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
528 */
529 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
530 const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000531 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
532 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100533 * Valid configurations (Input1,Input2) -> Output :
534 *
535 * - (U8,U8) -> U8
536 * - (U8,U8) -> S16
537 * - (S16,U8) -> S16
538 * - (U8,S16) -> S16
539 * - (S16,S16) -> S16
540 * - (S32,S32) -> S32
541 * - (F16,F16) -> F16
542 * - (F32,F32) -> F32
543 * - (QASYMM8,QASYMM8) -> QASYMM8
544 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
545 * - (QSYMM16,QSYMM16) -> QSYMM16
546 *
547 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
548 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
549 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000550 * @param[in] policy Policy to use to handle overflow.
551 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000552 *
553 * @return a status
554 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000555 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100556
557 // Inherited methods overridden:
558 void run() override;
559
560private:
561 struct Impl;
562 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000563};
564
565/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
566 *
567 * @note The tensor data type for the inputs must be F16/F32.
568 * @note The function performs an arithmetic division between two tensors.
569 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100570class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000571{
572public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100573 /** Default Constructor */
574 CLArithmeticDivision();
575 /** Default Destructor */
576 ~CLArithmeticDivision();
577 /** Prevent instances of this class from being copied (As this class contains pointers) */
578 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
579 /** Default move constructor */
580 CLArithmeticDivision(CLArithmeticDivision &&);
581 /** Prevent instances of this class from being copied (As this class contains pointers) */
582 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
583 /** Default move assignment operator */
584 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000585 /** Initialise the kernel's inputs, output.
586 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000587 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
588 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
589 * @param[in, out] input2 Second tensor input. Same as @p input1.
590 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
591 * @param[out] output Output tensor. Data types supported: Same as @p input1.
592 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000593 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000594 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100595 /** Initialise the kernel's inputs, output.
596 *
597 * @param[in] compile_context The compile context to be used.
598 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
599 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
600 * @param[in, out] input2 Second tensor input. Same as @p input1.
601 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
602 * @param[out] output Output tensor. Data types supported: Same as @p input1.
603 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
604 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100605 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000606 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
607 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000608 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
609 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
610 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
611 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000612 *
613 * @return a status
614 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000615 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100616
617 // Inherited methods overridden:
618 void run() override;
619
620private:
621 struct Impl;
622 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000623};
624
625/** Basic function to run @ref CLArithmeticOperationKernel for max
626 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100627 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000628 * @note The function performs a max operation between two tensors.
629 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100630class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000631{
632public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100633 /** Default Constructor */
634 CLElementwiseMax();
635 /** Default Destructor */
636 ~CLElementwiseMax();
637 /** Prevent instances of this class from being copied (As this class contains pointers) */
638 CLElementwiseMax(const CLElementwiseMax &) = delete;
639 /** Default move constructor */
640 CLElementwiseMax(CLElementwiseMax &&);
641 /** Prevent instances of this class from being copied (As this class contains pointers) */
642 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
643 /** Default move assignment operator */
644 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000645 /** Initialise the kernel's inputs, output and conversion policy.
646 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100647 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000648 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100649 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000650 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100651 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000652 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000653 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000654 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100655 /** Initialise the kernel's inputs, output and conversion policy.
656 *
657 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100658 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100659 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100660 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100661 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100662 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100663 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
664 */
665 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000666 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
667 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100668 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
669 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
670 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000671 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000672 *
673 * @return a status
674 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000675 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100676
677 // Inherited methods overridden:
678 void run() override;
679
680private:
681 struct Impl;
682 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000683};
684
685/** Basic function to run @ref CLArithmeticOperationKernel for min
686 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100687 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000688 * @note The function performs a max operation between two tensors.
689 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100690class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000691{
692public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100693 /** Default Constructor */
694 CLElementwiseMin();
695 /** Default Destructor */
696 ~CLElementwiseMin();
697 /** Prevent instances of this class from being copied (As this class contains pointers) */
698 CLElementwiseMin(const CLElementwiseMin &) = delete;
699 /** Default move constructor */
700 CLElementwiseMin(CLElementwiseMin &&);
701 /** Prevent instances of this class from being copied (As this class contains pointers) */
702 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
703 /** Default move assignment operator */
704 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000705 /** Initialise the kernel's inputs, output and conversion policy.
706 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100707 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000708 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100709 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000710 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100711 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000712 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000713 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000714 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100715 /** Initialise the kernel's inputs, output and conversion policy.
716 *
717 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100718 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100719 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100720 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100721 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100722 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100723 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
724 */
725 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000726 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
727 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100728 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
729 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
730 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000731 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000732 *
733 * @return a status
734 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000735 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100736
737 // Inherited methods overridden:
738 void run() override;
739
740private:
741 struct Impl;
742 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000743};
744
745/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
746 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100747 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000748 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
749 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100750class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000751{
752public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100753 /** Default Constructor */
754 CLElementwiseSquaredDiff();
755 /** Default Destructor */
756 ~CLElementwiseSquaredDiff();
757 /** Prevent instances of this class from being copied (As this class contains pointers) */
758 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
759 /** Default move constructor */
760 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
761 /** Prevent instances of this class from being copied (As this class contains pointers) */
762 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
763 /** Default move assignment operator */
764 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000765 /** Initialise the kernel's inputs, output and conversion policy.
766 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100767 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000768 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100769 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000770 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100771 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000772 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000773 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000774 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100775 /** Initialise the kernel's inputs, output and conversion policy.
776 *
777 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100778 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100779 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100780 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100781 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100782 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100783 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
784 */
785 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000786 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
787 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100788 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
789 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
790 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000791 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000792 *
793 * @return a status
794 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000795 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100796
797 // Inherited methods overridden:
798 void run() override;
799
800private:
801 struct Impl;
802 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000803};
Usama Arif52c54f62019-05-14 10:22:36 +0100804
805/** Basic function to run @ref CLArithmeticOperationKernel for power
806 *
807 * @note The tensor data type for the inputs must be F16/F32.
808 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
809 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100810class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100811{
812public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100813 /** Default Constructor */
814 CLElementwisePower();
815 /** Default Destructor */
816 ~CLElementwisePower();
817 /** Prevent instances of this class from being copied (As this class contains pointers) */
818 CLElementwisePower(const CLElementwisePower &) = delete;
819 /** Default move constructor */
820 CLElementwisePower(CLElementwisePower &&);
821 /** Prevent instances of this class from being copied (As this class contains pointers) */
822 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
823 /** Default move assignment operator */
824 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100825 /** Initialise the kernel's inputs, output and conversion policy.
826 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000827 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
828 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
829 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
830 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
831 * @param[out] output Output tensor. Data types supported:F16/F32.
832 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100833 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000834 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100835 /** Initialise the kernel's inputs, output and conversion policy.
836 *
837 * @param[in] compile_context The compile context to be used.
838 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
839 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
840 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
841 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
842 * @param[out] output Output tensor. Data types supported:F16/F32.
843 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
844 */
845 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Usama Arif52c54f62019-05-14 10:22:36 +0100846 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power
847 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000848 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
849 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
850 * @param[in] output Output tensor info. Data types supported: F16/F32.
851 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100852 *
853 * @return a status
854 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000855 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100856
857 // Inherited methods overridden:
858 void run() override;
859
860private:
861 struct Impl;
862 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100863};
giuros01164a2722018-11-20 18:34:46 +0000864} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000865#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */