blob: 31d4f2e745b794862df078baf25403cd7bc0b89b [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Michalis Spyrouad7515d2020-07-24 00:02:23 +010027#include "arm_compute/runtime/CL/ICLOperator.h"
28#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000029
30namespace arm_compute
31{
32class ICLTensor;
33
Michalis Spyrouad7515d2020-07-24 00:02:23 +010034namespace experimental
35{
giuros01164a2722018-11-20 18:34:46 +000036/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
37 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010038 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +000039 * @note The function performs an arithmetic addition between two tensors.
40 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010041class CLArithmeticAddition : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +000042{
43public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +010044 /** Default Constructor */
45 CLArithmeticAddition();
Manuel Bottini2b84be52020-04-08 10:15:51 +010046 /** Initialise the kernel's inputs, output and conversion policy.
47 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010048 * Valid configurations (Input1,Input2) -> Output :
49 *
50 * - (U8,U8) -> U8
51 * - (U8,U8) -> S16
52 * - (S16,U8) -> S16
53 * - (U8,S16) -> S16
54 * - (S16,S16) -> S16
55 * - (S32,S32) -> S32
56 * - (F16,F16) -> F16
57 * - (F32,F32) -> F32
58 * - (QASYMM8,QASYMM8) -> QASYMM8
59 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
60 * - (QSYMM16,QSYMM16) -> QSYMM16
61 *
Manuel Bottini2b84be52020-04-08 10:15:51 +010062 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010063 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010064 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010065 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010066 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010067 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010068 * @param[in] policy Policy to use to handle overflow.
69 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
70 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010071 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
72 const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +000073 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
74 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010075 * Valid configurations (Input1,Input2) -> Output :
76 *
77 * - (U8,U8) -> U8
78 * - (U8,U8) -> S16
79 * - (S16,U8) -> S16
80 * - (U8,S16) -> S16
81 * - (S16,S16) -> S16
82 * - (S32,S32) -> S32
83 * - (F16,F16) -> F16
84 * - (F32,F32) -> F32
85 * - (QASYMM8,QASYMM8) -> QASYMM8
86 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
87 * - (QSYMM16,QSYMM16) -> QSYMM16
88 *
89 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
90 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
91 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000092 * @param[in] policy Policy to use to handle overflow.
93 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +000094 *
95 * @return a status
96 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000097 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +010098
99 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100100 void run(ITensorPack &tensors) override;
giuros01164a2722018-11-20 18:34:46 +0000101};
102
103/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
104 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100105 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000106 * @note The function performs an arithmetic subtraction between two tensors.
107 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100108class CLArithmeticSubtraction : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +0000109{
110public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100111 /** Default Constructor */
112 CLArithmeticSubtraction();
113 /** Initialise the kernel's inputs, output and conversion policy.
114 *
115 * Valid configurations (Input1,Input2) -> Output :
116 *
117 * - (U8,U8) -> U8
118 * - (U8,U8) -> S16
119 * - (S16,U8) -> S16
120 * - (U8,S16) -> S16
121 * - (S16,S16) -> S16
122 * - (S32,S32) -> S32
123 * - (F16,F16) -> F16
124 * - (F32,F32) -> F32
125 * - (QASYMM8,QASYMM8) -> QASYMM8
126 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
127 * - (QSYMM16,QSYMM16) -> QSYMM16
128 *
129 * @param[in] compile_context The compile context to be used.
130 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
131 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
132 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
133 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
134 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
135 * @param[in] policy Policy to use to handle overflow.
136 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
137 */
138 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
139 const ActivationLayerInfo &act_info = ActivationLayerInfo());
140 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
141 *
142 * Valid configurations (Input1,Input2) -> Output :
143 *
144 * - (U8,U8) -> U8
145 * - (U8,U8) -> S16
146 * - (S16,U8) -> S16
147 * - (U8,S16) -> S16
148 * - (S16,S16) -> S16
149 * - (S32,S32) -> S32
150 * - (F16,F16) -> F16
151 * - (F32,F32) -> F32
152 * - (QASYMM8,QASYMM8) -> QASYMM8
153 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
154 * - (QSYMM16,QSYMM16) -> QSYMM16
155 *
156 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
157 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
158 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
159 * @param[in] policy Policy to use to handle overflow.
160 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
161 *
162 * @return a status
163 */
164 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
165
166 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100167 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100168};
169
170/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
171 *
172 * @note The tensor data type for the inputs must be F16/F32.
173 * @note The function performs an arithmetic division between two tensors.
174 */
175class CLArithmeticDivision : public ICLOperator
176{
177public:
178 /** Default Constructor */
179 CLArithmeticDivision();
180 /** Initialise the kernel's inputs, output.
181 *
182 * @param[in] compile_context The compile context to be used.
183 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
184 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
185 * @param[in, out] input2 Second tensor input. Same as @p input1.
186 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
187 * @param[out] output Output tensor. Data types supported: Same as @p input1.
188 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
189 */
190 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
191 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
192 *
193 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
194 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
195 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
196 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
197 *
198 * @return a status
199 */
200 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
201
202 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100203 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100204};
205
206/** Basic function to run @ref CLArithmeticOperationKernel for max
207 *
208 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
209 * @note The function performs a max operation between two tensors.
210 */
211class CLElementwiseMax : public ICLOperator
212{
213public:
214 /** Default Constructor */
215 CLElementwiseMax();
216 /** Initialise the kernel's inputs, output and conversion policy.
217 *
218 * @param[in] compile_context The compile context to be used.
219 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
220 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
221 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
222 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
223 * @param[out] output Output tensor. Data types supported: same as @p input1.
224 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
225 */
226 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
227 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
228 *
229 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
230 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
231 * @param[in] output Output tensor info. Data types supported: same as @p input1.
232 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
233 *
234 * @return a status
235 */
236 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
237
238 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100239 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100240};
241
242/** Basic function to run @ref CLArithmeticOperationKernel for min
243 *
244 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
245 * @note The function performs a max operation between two tensors.
246 */
247class CLElementwiseMin : public ICLOperator
248{
249public:
250 /** Default Constructor */
251 CLElementwiseMin();
252 /** Initialise the kernel's inputs, output and conversion policy.
253 *
254 * @param[in] compile_context The compile context to be used.
255 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
256 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
257 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
258 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
259 * @param[out] output Output tensor. Data types supported: same as @p input1.
260 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
261 */
262 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
263 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
264 *
265 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
266 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
267 * @param[in] output Output tensor info. Data types supported: same as @p input1.
268 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
269 *
270 * @return a status
271 */
272 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
273
274 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100275 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100276};
277
278/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
279 *
280 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
281 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
282 */
283class CLElementwiseSquaredDiff : public ICLOperator
284{
285public:
286 /** Default Constructor */
287 CLElementwiseSquaredDiff();
288 /** Initialise the kernel's inputs, output and conversion policy.
289 *
290 * @param[in] compile_context The compile context to be used.
291 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
292 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
293 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
294 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
295 * @param[out] output Output tensor. Data types supported: same as @p input1.
296 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
297 */
298 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
299 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
300 *
301 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
302 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
303 * @param[in] output Output tensor info. Data types supported: same as @p input1.
304 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
305 *
306 * @return a status
307 */
308 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
309
310 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100311 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100312};
313
314/** Basic function to run @ref CLArithmeticOperationKernel for power
315 *
316 * @note The tensor data type for the inputs must be F16/F32.
317 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
318 */
319class CLElementwisePower : public ICLOperator
320{
321public:
322 /** Default Constructor */
323 CLElementwisePower();
324 /** Initialise the kernel's inputs, output and conversion policy.
325 *
326 * @param[in] compile_context The compile context to be used.
327 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
328 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
329 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
330 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
331 * @param[out] output Output tensor. Data types supported:F16/F32.
332 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
333 */
334 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
335 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power
336 *
337 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
338 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
339 * @param[in] output Output tensor info. Data types supported: F16/F32.
340 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
341 *
342 * @return a status
343 */
344 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
345
346 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100347 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100348};
349} // namespace experimental
350
351/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
352 *
353 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
354 * @note The function performs an arithmetic addition between two tensors.
355 */
356class CLArithmeticAddition : public IFunction
357{
358public:
359 /** Default Constructor */
360 CLArithmeticAddition();
361 /** Default Destructor */
362 ~CLArithmeticAddition();
363 /** Prevent instances of this class from being copied (As this class contains pointers) */
364 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
365 /** Default move constructor */
366 CLArithmeticAddition(CLArithmeticAddition &&);
367 /** Prevent instances of this class from being copied (As this class contains pointers) */
368 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
369 /** Default move assignment operator */
370 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +0000371 /** Initialise the kernel's inputs, output and conversion policy.
372 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100373 * Valid configurations (Input1,Input2) -> Output :
374 *
375 * - (U8,U8) -> U8
376 * - (U8,U8) -> S16
377 * - (S16,U8) -> S16
378 * - (U8,S16) -> S16
379 * - (S16,S16) -> S16
380 * - (S32,S32) -> S32
381 * - (F16,F16) -> F16
382 * - (F32,F32) -> F32
383 * - (QASYMM8,QASYMM8) -> QASYMM8
384 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
385 * - (QSYMM16,QSYMM16) -> QSYMM16
386 *
387 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000388 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100389 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000390 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100391 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000392 * @param[in] policy Policy to use to handle overflow.
393 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000394 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000395 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100396 /** Initialise the kernel's inputs, output and conversion policy.
397 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100398 * Valid configurations (Input1,Input2) -> Output :
399 *
400 * - (U8,U8) -> U8
401 * - (U8,U8) -> S16
402 * - (S16,U8) -> S16
403 * - (U8,S16) -> S16
404 * - (S16,S16) -> S16
405 * - (S32,S32) -> S32
406 * - (F16,F16) -> F16
407 * - (F32,F32) -> F32
408 * - (QASYMM8,QASYMM8) -> QASYMM8
409 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
410 * - (QSYMM16,QSYMM16) -> QSYMM16
411 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100412 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100413 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100414 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100415 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100416 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100417 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100418 * @param[in] policy Policy to use to handle overflow.
419 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
420 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100421 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
422 const ActivationLayerInfo &act_info = ActivationLayerInfo());
423 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
424 *
425 * Valid configurations (Input1,Input2) -> Output :
426 *
427 * - (U8,U8) -> U8
428 * - (U8,U8) -> S16
429 * - (S16,U8) -> S16
430 * - (U8,S16) -> S16
431 * - (S16,S16) -> S16
432 * - (S32,S32) -> S32
433 * - (F16,F16) -> F16
434 * - (F32,F32) -> F32
435 * - (QASYMM8,QASYMM8) -> QASYMM8
436 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
437 * - (QSYMM16,QSYMM16) -> QSYMM16
438 *
439 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
440 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
441 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
442 * @param[in] policy Policy to use to handle overflow.
443 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
444 *
445 * @return a status
446 */
447 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
448
449 // Inherited methods overridden:
450 void run() override;
451
452private:
453 struct Impl;
454 std::unique_ptr<Impl> _impl;
455};
456
457/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
458 *
459 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
460 * @note The function performs an arithmetic subtraction between two tensors.
461 */
462class CLArithmeticSubtraction : public IFunction
463{
464public:
465 /** Default Constructor */
466 CLArithmeticSubtraction();
467 /** Default Destructor */
468 ~CLArithmeticSubtraction();
469 /** Prevent instances of this class from being copied (As this class contains pointers) */
470 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
471 /** Default move constructor */
472 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
473 /** Prevent instances of this class from being copied (As this class contains pointers) */
474 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
475 /** Default move assignment operator */
476 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
477 /** Initialise the kernel's inputs, output and conversion policy.
478 *
479 * Valid configurations (Input1,Input2) -> Output :
480 *
481 * - (U8,U8) -> U8
482 * - (U8,U8) -> S16
483 * - (S16,U8) -> S16
484 * - (U8,S16) -> S16
485 * - (S16,S16) -> S16
486 * - (S32,S32) -> S32
487 * - (F16,F16) -> F16
488 * - (F32,F32) -> F32
489 * - (QASYMM8,QASYMM8) -> QASYMM8
490 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
491 * - (QSYMM16,QSYMM16) -> QSYMM16
492 *
493 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
494 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
495 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
496 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
497 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
498 * @param[in] policy Policy to use to handle overflow.
499 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
500 */
501 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
502 /** Initialise the kernel's inputs, output and conversion policy.
503 *
504 * Valid configurations (Input1,Input2) -> Output :
505 *
506 * - (U8,U8) -> U8
507 * - (U8,U8) -> S16
508 * - (S16,U8) -> S16
509 * - (U8,S16) -> S16
510 * - (S16,S16) -> S16
511 * - (S32,S32) -> S32
512 * - (F16,F16) -> F16
513 * - (F32,F32) -> F32
514 * - (QASYMM8,QASYMM8) -> QASYMM8
515 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
516 * - (QSYMM16,QSYMM16) -> QSYMM16
517 *
518 * @param[in] compile_context The compile context to be used.
519 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
520 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
521 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
522 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
523 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
524 * @param[in] policy Policy to use to handle overflow.
525 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
526 */
527 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
528 const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000529 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
530 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100531 * Valid configurations (Input1,Input2) -> Output :
532 *
533 * - (U8,U8) -> U8
534 * - (U8,U8) -> S16
535 * - (S16,U8) -> S16
536 * - (U8,S16) -> S16
537 * - (S16,S16) -> S16
538 * - (S32,S32) -> S32
539 * - (F16,F16) -> F16
540 * - (F32,F32) -> F32
541 * - (QASYMM8,QASYMM8) -> QASYMM8
542 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
543 * - (QSYMM16,QSYMM16) -> QSYMM16
544 *
545 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
546 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
547 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000548 * @param[in] policy Policy to use to handle overflow.
549 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000550 *
551 * @return a status
552 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000553 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100554
555 // Inherited methods overridden:
556 void run() override;
557
558private:
559 struct Impl;
560 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000561};
562
563/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
564 *
565 * @note The tensor data type for the inputs must be F16/F32.
566 * @note The function performs an arithmetic division between two tensors.
567 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100568class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000569{
570public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100571 /** Default Constructor */
572 CLArithmeticDivision();
573 /** Default Destructor */
574 ~CLArithmeticDivision();
575 /** Prevent instances of this class from being copied (As this class contains pointers) */
576 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
577 /** Default move constructor */
578 CLArithmeticDivision(CLArithmeticDivision &&);
579 /** Prevent instances of this class from being copied (As this class contains pointers) */
580 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
581 /** Default move assignment operator */
582 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000583 /** Initialise the kernel's inputs, output.
584 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000585 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
586 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
587 * @param[in, out] input2 Second tensor input. Same as @p input1.
588 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
589 * @param[out] output Output tensor. Data types supported: Same as @p input1.
590 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000591 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000592 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100593 /** Initialise the kernel's inputs, output.
594 *
595 * @param[in] compile_context The compile context to be used.
596 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
597 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
598 * @param[in, out] input2 Second tensor input. Same as @p input1.
599 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
600 * @param[out] output Output tensor. Data types supported: Same as @p input1.
601 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
602 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100603 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000604 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
605 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000606 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
607 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
608 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
609 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000610 *
611 * @return a status
612 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000613 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100614
615 // Inherited methods overridden:
616 void run() override;
617
618private:
619 struct Impl;
620 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000621};
622
623/** Basic function to run @ref CLArithmeticOperationKernel for max
624 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100625 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000626 * @note The function performs a max operation between two tensors.
627 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100628class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000629{
630public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100631 /** Default Constructor */
632 CLElementwiseMax();
633 /** Default Destructor */
634 ~CLElementwiseMax();
635 /** Prevent instances of this class from being copied (As this class contains pointers) */
636 CLElementwiseMax(const CLElementwiseMax &) = delete;
637 /** Default move constructor */
638 CLElementwiseMax(CLElementwiseMax &&);
639 /** Prevent instances of this class from being copied (As this class contains pointers) */
640 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
641 /** Default move assignment operator */
642 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000643 /** Initialise the kernel's inputs, output and conversion policy.
644 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100645 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000646 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100647 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000648 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100649 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000650 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000651 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000652 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100653 /** Initialise the kernel's inputs, output and conversion policy.
654 *
655 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100656 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100657 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100658 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100659 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100660 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100661 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
662 */
663 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000664 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
665 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100666 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
667 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
668 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000669 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000670 *
671 * @return a status
672 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000673 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100674
675 // Inherited methods overridden:
676 void run() override;
677
678private:
679 struct Impl;
680 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000681};
682
683/** Basic function to run @ref CLArithmeticOperationKernel for min
684 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100685 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000686 * @note The function performs a max operation between two tensors.
687 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100688class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000689{
690public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100691 /** Default Constructor */
692 CLElementwiseMin();
693 /** Default Destructor */
694 ~CLElementwiseMin();
695 /** Prevent instances of this class from being copied (As this class contains pointers) */
696 CLElementwiseMin(const CLElementwiseMin &) = delete;
697 /** Default move constructor */
698 CLElementwiseMin(CLElementwiseMin &&);
699 /** Prevent instances of this class from being copied (As this class contains pointers) */
700 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
701 /** Default move assignment operator */
702 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000703 /** Initialise the kernel's inputs, output and conversion policy.
704 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100705 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000706 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100707 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000708 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100709 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000710 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000711 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000712 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100713 /** Initialise the kernel's inputs, output and conversion policy.
714 *
715 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100716 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100717 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100718 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100719 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100720 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100721 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
722 */
723 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000724 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
725 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100726 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
727 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
728 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000729 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000730 *
731 * @return a status
732 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000733 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100734
735 // Inherited methods overridden:
736 void run() override;
737
738private:
739 struct Impl;
740 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000741};
742
743/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
744 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100745 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000746 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
747 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100748class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000749{
750public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100751 /** Default Constructor */
752 CLElementwiseSquaredDiff();
753 /** Default Destructor */
754 ~CLElementwiseSquaredDiff();
755 /** Prevent instances of this class from being copied (As this class contains pointers) */
756 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
757 /** Default move constructor */
758 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
759 /** Prevent instances of this class from being copied (As this class contains pointers) */
760 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
761 /** Default move assignment operator */
762 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000763 /** Initialise the kernel's inputs, output and conversion policy.
764 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100765 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000766 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100767 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000768 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100769 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000770 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000771 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000772 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100773 /** Initialise the kernel's inputs, output and conversion policy.
774 *
775 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100776 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100777 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100778 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100779 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100780 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100781 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
782 */
783 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000784 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
785 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100786 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
787 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
788 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000789 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000790 *
791 * @return a status
792 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000793 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100794
795 // Inherited methods overridden:
796 void run() override;
797
798private:
799 struct Impl;
800 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000801};
Usama Arif52c54f62019-05-14 10:22:36 +0100802
803/** Basic function to run @ref CLArithmeticOperationKernel for power
804 *
805 * @note The tensor data type for the inputs must be F16/F32.
806 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
807 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100808class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100809{
810public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100811 /** Default Constructor */
812 CLElementwisePower();
813 /** Default Destructor */
814 ~CLElementwisePower();
815 /** Prevent instances of this class from being copied (As this class contains pointers) */
816 CLElementwisePower(const CLElementwisePower &) = delete;
817 /** Default move constructor */
818 CLElementwisePower(CLElementwisePower &&);
819 /** Prevent instances of this class from being copied (As this class contains pointers) */
820 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
821 /** Default move assignment operator */
822 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100823 /** Initialise the kernel's inputs, output and conversion policy.
824 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000825 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
826 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
827 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
828 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
829 * @param[out] output Output tensor. Data types supported:F16/F32.
830 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100831 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000832 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100833 /** Initialise the kernel's inputs, output and conversion policy.
834 *
835 * @param[in] compile_context The compile context to be used.
836 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
837 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
838 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
839 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
840 * @param[out] output Output tensor. Data types supported:F16/F32.
841 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
842 */
843 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Usama Arif52c54f62019-05-14 10:22:36 +0100844 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power
845 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000846 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
847 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
848 * @param[in] output Output tensor info. Data types supported: F16/F32.
849 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100850 *
851 * @return a status
852 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000853 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100854
855 // Inherited methods overridden:
856 void run() override;
857
858private:
859 struct Impl;
860 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100861};
giuros01164a2722018-11-20 18:34:46 +0000862} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000863#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */