blob: 2d9d43863df512c0d39f34f1406e0d14393da620 [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Michalis Spyrouad7515d2020-07-24 00:02:23 +010027#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
28#include "arm_compute/runtime/CL/ICLOperator.h"
29#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000030
31namespace arm_compute
32{
33class ICLTensor;
34
Michalis Spyrouad7515d2020-07-24 00:02:23 +010035namespace experimental
36{
giuros01164a2722018-11-20 18:34:46 +000037/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
38 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010039 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +000040 * @note The function performs an arithmetic addition between two tensors.
41 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010042class CLArithmeticAddition : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +000043{
44public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +010045 /** Default Constructor */
46 CLArithmeticAddition();
Manuel Bottini2b84be52020-04-08 10:15:51 +010047 /** Initialise the kernel's inputs, output and conversion policy.
48 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010049 * Valid configurations (Input1,Input2) -> Output :
50 *
51 * - (U8,U8) -> U8
52 * - (U8,U8) -> S16
53 * - (S16,U8) -> S16
54 * - (U8,S16) -> S16
55 * - (S16,S16) -> S16
56 * - (S32,S32) -> S32
57 * - (F16,F16) -> F16
58 * - (F32,F32) -> F32
59 * - (QASYMM8,QASYMM8) -> QASYMM8
60 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
61 * - (QSYMM16,QSYMM16) -> QSYMM16
62 *
Manuel Bottini2b84be52020-04-08 10:15:51 +010063 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010064 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010065 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010066 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010067 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010068 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010069 * @param[in] policy Policy to use to handle overflow.
70 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
71 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +010072 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
73 const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +000074 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
75 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010076 * Valid configurations (Input1,Input2) -> Output :
77 *
78 * - (U8,U8) -> U8
79 * - (U8,U8) -> S16
80 * - (S16,U8) -> S16
81 * - (U8,S16) -> S16
82 * - (S16,S16) -> S16
83 * - (S32,S32) -> S32
84 * - (F16,F16) -> F16
85 * - (F32,F32) -> F32
86 * - (QASYMM8,QASYMM8) -> QASYMM8
87 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
88 * - (QSYMM16,QSYMM16) -> QSYMM16
89 *
90 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
91 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
92 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000093 * @param[in] policy Policy to use to handle overflow.
94 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +000095 *
96 * @return a status
97 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000098 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +010099
100 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100101 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100102
103private:
104 CLFillBorderKernel _border_handler;
giuros01164a2722018-11-20 18:34:46 +0000105};
106
107/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
108 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100109 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000110 * @note The function performs an arithmetic subtraction between two tensors.
111 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100112class CLArithmeticSubtraction : public ICLOperator
giuros01164a2722018-11-20 18:34:46 +0000113{
114public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100115 /** Default Constructor */
116 CLArithmeticSubtraction();
117 /** Initialise the kernel's inputs, output and conversion policy.
118 *
119 * Valid configurations (Input1,Input2) -> Output :
120 *
121 * - (U8,U8) -> U8
122 * - (U8,U8) -> S16
123 * - (S16,U8) -> S16
124 * - (U8,S16) -> S16
125 * - (S16,S16) -> S16
126 * - (S32,S32) -> S32
127 * - (F16,F16) -> F16
128 * - (F32,F32) -> F32
129 * - (QASYMM8,QASYMM8) -> QASYMM8
130 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
131 * - (QSYMM16,QSYMM16) -> QSYMM16
132 *
133 * @param[in] compile_context The compile context to be used.
134 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
135 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
136 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
137 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
138 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
139 * @param[in] policy Policy to use to handle overflow.
140 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
141 */
142 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
143 const ActivationLayerInfo &act_info = ActivationLayerInfo());
144 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
145 *
146 * Valid configurations (Input1,Input2) -> Output :
147 *
148 * - (U8,U8) -> U8
149 * - (U8,U8) -> S16
150 * - (S16,U8) -> S16
151 * - (U8,S16) -> S16
152 * - (S16,S16) -> S16
153 * - (S32,S32) -> S32
154 * - (F16,F16) -> F16
155 * - (F32,F32) -> F32
156 * - (QASYMM8,QASYMM8) -> QASYMM8
157 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
158 * - (QSYMM16,QSYMM16) -> QSYMM16
159 *
160 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
161 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
162 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
163 * @param[in] policy Policy to use to handle overflow.
164 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
165 *
166 * @return a status
167 */
168 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
169
170 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100171 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100172
173private:
174 CLFillBorderKernel _border_handler;
175};
176
177/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
178 *
179 * @note The tensor data type for the inputs must be F16/F32.
180 * @note The function performs an arithmetic division between two tensors.
181 */
182class CLArithmeticDivision : public ICLOperator
183{
184public:
185 /** Default Constructor */
186 CLArithmeticDivision();
187 /** Initialise the kernel's inputs, output.
188 *
189 * @param[in] compile_context The compile context to be used.
190 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
191 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
192 * @param[in, out] input2 Second tensor input. Same as @p input1.
193 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
194 * @param[out] output Output tensor. Data types supported: Same as @p input1.
195 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
196 */
197 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
198 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
199 *
200 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
201 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
202 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
203 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
204 *
205 * @return a status
206 */
207 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
208
209 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100210 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100211
212private:
213 CLFillBorderKernel _border_handler;
214};
215
216/** Basic function to run @ref CLArithmeticOperationKernel for max
217 *
218 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
219 * @note The function performs a max operation between two tensors.
220 */
221class CLElementwiseMax : public ICLOperator
222{
223public:
224 /** Default Constructor */
225 CLElementwiseMax();
226 /** Initialise the kernel's inputs, output and conversion policy.
227 *
228 * @param[in] compile_context The compile context to be used.
229 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
230 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
231 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
232 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
233 * @param[out] output Output tensor. Data types supported: same as @p input1.
234 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
235 */
236 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
237 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
238 *
239 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
240 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
241 * @param[in] output Output tensor info. Data types supported: same as @p input1.
242 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
243 *
244 * @return a status
245 */
246 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
247
248 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100249 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100250
251private:
252 CLFillBorderKernel _border_handler;
253};
254
255/** Basic function to run @ref CLArithmeticOperationKernel for min
256 *
257 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
258 * @note The function performs a max operation between two tensors.
259 */
260class CLElementwiseMin : public ICLOperator
261{
262public:
263 /** Default Constructor */
264 CLElementwiseMin();
265 /** Initialise the kernel's inputs, output and conversion policy.
266 *
267 * @param[in] compile_context The compile context to be used.
268 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
269 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
270 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
271 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
272 * @param[out] output Output tensor. Data types supported: same as @p input1.
273 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
274 */
275 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
276 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
277 *
278 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
279 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
280 * @param[in] output Output tensor info. Data types supported: same as @p input1.
281 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
282 *
283 * @return a status
284 */
285 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
286
287 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100288 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100289
290private:
291 CLFillBorderKernel _border_handler;
292};
293
294/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
295 *
296 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
297 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
298 */
299class CLElementwiseSquaredDiff : public ICLOperator
300{
301public:
302 /** Default Constructor */
303 CLElementwiseSquaredDiff();
304 /** Initialise the kernel's inputs, output and conversion policy.
305 *
306 * @param[in] compile_context The compile context to be used.
307 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
308 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
309 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
310 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
311 * @param[out] output Output tensor. Data types supported: same as @p input1.
312 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
313 */
314 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
315 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
316 *
317 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
318 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
319 * @param[in] output Output tensor info. Data types supported: same as @p input1.
320 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
321 *
322 * @return a status
323 */
324 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
325
326 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100327 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100328
329private:
330 CLFillBorderKernel _border_handler;
331};
332
333/** Basic function to run @ref CLArithmeticOperationKernel for power
334 *
335 * @note The tensor data type for the inputs must be F16/F32.
336 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
337 */
338class CLElementwisePower : public ICLOperator
339{
340public:
341 /** Default Constructor */
342 CLElementwisePower();
343 /** Initialise the kernel's inputs, output and conversion policy.
344 *
345 * @param[in] compile_context The compile context to be used.
346 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
347 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
348 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
349 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
350 * @param[out] output Output tensor. Data types supported:F16/F32.
351 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
352 */
353 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
354 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power
355 *
356 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
357 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
358 * @param[in] output Output tensor info. Data types supported: F16/F32.
359 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
360 *
361 * @return a status
362 */
363 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
364
365 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100366 void run(ITensorPack &tensors) override;
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100367
368private:
369 CLFillBorderKernel _border_handler;
370};
371} // namespace experimental
372
373/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
374 *
375 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
376 * @note The function performs an arithmetic addition between two tensors.
377 */
378class CLArithmeticAddition : public IFunction
379{
380public:
381 /** Default Constructor */
382 CLArithmeticAddition();
383 /** Default Destructor */
384 ~CLArithmeticAddition();
385 /** Prevent instances of this class from being copied (As this class contains pointers) */
386 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
387 /** Default move constructor */
388 CLArithmeticAddition(CLArithmeticAddition &&);
389 /** Prevent instances of this class from being copied (As this class contains pointers) */
390 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
391 /** Default move assignment operator */
392 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +0000393 /** Initialise the kernel's inputs, output and conversion policy.
394 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100395 * Valid configurations (Input1,Input2) -> Output :
396 *
397 * - (U8,U8) -> U8
398 * - (U8,U8) -> S16
399 * - (S16,U8) -> S16
400 * - (U8,S16) -> S16
401 * - (S16,S16) -> S16
402 * - (S32,S32) -> S32
403 * - (F16,F16) -> F16
404 * - (F32,F32) -> F32
405 * - (QASYMM8,QASYMM8) -> QASYMM8
406 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
407 * - (QSYMM16,QSYMM16) -> QSYMM16
408 *
409 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000410 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100411 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000412 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100413 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000414 * @param[in] policy Policy to use to handle overflow.
415 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000416 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000417 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100418 /** Initialise the kernel's inputs, output and conversion policy.
419 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100420 * Valid configurations (Input1,Input2) -> Output :
421 *
422 * - (U8,U8) -> U8
423 * - (U8,U8) -> S16
424 * - (S16,U8) -> S16
425 * - (U8,S16) -> S16
426 * - (S16,S16) -> S16
427 * - (S32,S32) -> S32
428 * - (F16,F16) -> F16
429 * - (F32,F32) -> F32
430 * - (QASYMM8,QASYMM8) -> QASYMM8
431 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
432 * - (QSYMM16,QSYMM16) -> QSYMM16
433 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100434 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100435 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100436 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100437 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100438 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100439 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100440 * @param[in] policy Policy to use to handle overflow.
441 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
442 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100443 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
444 const ActivationLayerInfo &act_info = ActivationLayerInfo());
445 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
446 *
447 * Valid configurations (Input1,Input2) -> Output :
448 *
449 * - (U8,U8) -> U8
450 * - (U8,U8) -> S16
451 * - (S16,U8) -> S16
452 * - (U8,S16) -> S16
453 * - (S16,S16) -> S16
454 * - (S32,S32) -> S32
455 * - (F16,F16) -> F16
456 * - (F32,F32) -> F32
457 * - (QASYMM8,QASYMM8) -> QASYMM8
458 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
459 * - (QSYMM16,QSYMM16) -> QSYMM16
460 *
461 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
462 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
463 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
464 * @param[in] policy Policy to use to handle overflow.
465 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
466 *
467 * @return a status
468 */
469 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
470
471 // Inherited methods overridden:
472 void run() override;
473
474private:
475 struct Impl;
476 std::unique_ptr<Impl> _impl;
477};
478
479/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
480 *
481 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
482 * @note The function performs an arithmetic subtraction between two tensors.
483 */
484class CLArithmeticSubtraction : public IFunction
485{
486public:
487 /** Default Constructor */
488 CLArithmeticSubtraction();
489 /** Default Destructor */
490 ~CLArithmeticSubtraction();
491 /** Prevent instances of this class from being copied (As this class contains pointers) */
492 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
493 /** Default move constructor */
494 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
495 /** Prevent instances of this class from being copied (As this class contains pointers) */
496 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
497 /** Default move assignment operator */
498 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
499 /** Initialise the kernel's inputs, output and conversion policy.
500 *
501 * Valid configurations (Input1,Input2) -> Output :
502 *
503 * - (U8,U8) -> U8
504 * - (U8,U8) -> S16
505 * - (S16,U8) -> S16
506 * - (U8,S16) -> S16
507 * - (S16,S16) -> S16
508 * - (S32,S32) -> S32
509 * - (F16,F16) -> F16
510 * - (F32,F32) -> F32
511 * - (QASYMM8,QASYMM8) -> QASYMM8
512 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
513 * - (QSYMM16,QSYMM16) -> QSYMM16
514 *
515 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
516 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
517 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
518 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
519 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
520 * @param[in] policy Policy to use to handle overflow.
521 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
522 */
523 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
524 /** Initialise the kernel's inputs, output and conversion policy.
525 *
526 * Valid configurations (Input1,Input2) -> Output :
527 *
528 * - (U8,U8) -> U8
529 * - (U8,U8) -> S16
530 * - (S16,U8) -> S16
531 * - (U8,S16) -> S16
532 * - (S16,S16) -> S16
533 * - (S32,S32) -> S32
534 * - (F16,F16) -> F16
535 * - (F32,F32) -> F32
536 * - (QASYMM8,QASYMM8) -> QASYMM8
537 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
538 * - (QSYMM16,QSYMM16) -> QSYMM16
539 *
540 * @param[in] compile_context The compile context to be used.
541 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
542 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
543 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
544 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
545 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
546 * @param[in] policy Policy to use to handle overflow.
547 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
548 */
549 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
550 const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000551 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
552 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100553 * Valid configurations (Input1,Input2) -> Output :
554 *
555 * - (U8,U8) -> U8
556 * - (U8,U8) -> S16
557 * - (S16,U8) -> S16
558 * - (U8,S16) -> S16
559 * - (S16,S16) -> S16
560 * - (S32,S32) -> S32
561 * - (F16,F16) -> F16
562 * - (F32,F32) -> F32
563 * - (QASYMM8,QASYMM8) -> QASYMM8
564 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
565 * - (QSYMM16,QSYMM16) -> QSYMM16
566 *
567 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
568 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
569 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000570 * @param[in] policy Policy to use to handle overflow.
571 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000572 *
573 * @return a status
574 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000575 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100576
577 // Inherited methods overridden:
578 void run() override;
579
580private:
581 struct Impl;
582 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000583};
584
585/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
586 *
587 * @note The tensor data type for the inputs must be F16/F32.
588 * @note The function performs an arithmetic division between two tensors.
589 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100590class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000591{
592public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100593 /** Default Constructor */
594 CLArithmeticDivision();
595 /** Default Destructor */
596 ~CLArithmeticDivision();
597 /** Prevent instances of this class from being copied (As this class contains pointers) */
598 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
599 /** Default move constructor */
600 CLArithmeticDivision(CLArithmeticDivision &&);
601 /** Prevent instances of this class from being copied (As this class contains pointers) */
602 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
603 /** Default move assignment operator */
604 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000605 /** Initialise the kernel's inputs, output.
606 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000607 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
608 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
609 * @param[in, out] input2 Second tensor input. Same as @p input1.
610 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
611 * @param[out] output Output tensor. Data types supported: Same as @p input1.
612 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000613 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000614 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100615 /** Initialise the kernel's inputs, output.
616 *
617 * @param[in] compile_context The compile context to be used.
618 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
619 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
620 * @param[in, out] input2 Second tensor input. Same as @p input1.
621 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
622 * @param[out] output Output tensor. Data types supported: Same as @p input1.
623 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
624 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100625 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000626 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
627 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000628 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
629 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
630 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
631 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000632 *
633 * @return a status
634 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000635 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100636
637 // Inherited methods overridden:
638 void run() override;
639
640private:
641 struct Impl;
642 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000643};
644
645/** Basic function to run @ref CLArithmeticOperationKernel for max
646 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100647 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000648 * @note The function performs a max operation between two tensors.
649 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100650class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000651{
652public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100653 /** Default Constructor */
654 CLElementwiseMax();
655 /** Default Destructor */
656 ~CLElementwiseMax();
657 /** Prevent instances of this class from being copied (As this class contains pointers) */
658 CLElementwiseMax(const CLElementwiseMax &) = delete;
659 /** Default move constructor */
660 CLElementwiseMax(CLElementwiseMax &&);
661 /** Prevent instances of this class from being copied (As this class contains pointers) */
662 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
663 /** Default move assignment operator */
664 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000665 /** Initialise the kernel's inputs, output and conversion policy.
666 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100667 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000668 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100669 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000670 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100671 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000672 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000673 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000674 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100675 /** Initialise the kernel's inputs, output and conversion policy.
676 *
677 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100678 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100679 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100680 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100681 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100682 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100683 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
684 */
685 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000686 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
687 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100688 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
689 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
690 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000691 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000692 *
693 * @return a status
694 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000695 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100696
697 // Inherited methods overridden:
698 void run() override;
699
700private:
701 struct Impl;
702 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000703};
704
705/** Basic function to run @ref CLArithmeticOperationKernel for min
706 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100707 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000708 * @note The function performs a max operation between two tensors.
709 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100710class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000711{
712public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100713 /** Default Constructor */
714 CLElementwiseMin();
715 /** Default Destructor */
716 ~CLElementwiseMin();
717 /** Prevent instances of this class from being copied (As this class contains pointers) */
718 CLElementwiseMin(const CLElementwiseMin &) = delete;
719 /** Default move constructor */
720 CLElementwiseMin(CLElementwiseMin &&);
721 /** Prevent instances of this class from being copied (As this class contains pointers) */
722 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
723 /** Default move assignment operator */
724 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000725 /** Initialise the kernel's inputs, output and conversion policy.
726 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100727 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000728 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100729 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000730 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100731 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000732 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000733 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000734 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100735 /** Initialise the kernel's inputs, output and conversion policy.
736 *
737 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100738 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100739 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100740 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100741 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100742 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100743 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
744 */
745 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000746 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
747 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100748 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
749 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
750 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000751 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000752 *
753 * @return a status
754 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000755 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100756
757 // Inherited methods overridden:
758 void run() override;
759
760private:
761 struct Impl;
762 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000763};
764
765/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
766 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100767 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000768 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
769 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100770class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000771{
772public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100773 /** Default Constructor */
774 CLElementwiseSquaredDiff();
775 /** Default Destructor */
776 ~CLElementwiseSquaredDiff();
777 /** Prevent instances of this class from being copied (As this class contains pointers) */
778 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
779 /** Default move constructor */
780 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
781 /** Prevent instances of this class from being copied (As this class contains pointers) */
782 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
783 /** Default move assignment operator */
784 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000785 /** Initialise the kernel's inputs, output and conversion policy.
786 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100787 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000788 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100789 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000790 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100791 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000792 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000793 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000794 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100795 /** Initialise the kernel's inputs, output and conversion policy.
796 *
797 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100798 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100799 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100800 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100801 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100802 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100803 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
804 */
805 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000806 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
807 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100808 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
809 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
810 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000811 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000812 *
813 * @return a status
814 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000815 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100816
817 // Inherited methods overridden:
818 void run() override;
819
820private:
821 struct Impl;
822 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000823};
Usama Arif52c54f62019-05-14 10:22:36 +0100824
825/** Basic function to run @ref CLArithmeticOperationKernel for power
826 *
827 * @note The tensor data type for the inputs must be F16/F32.
828 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
829 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100830class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100831{
832public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100833 /** Default Constructor */
834 CLElementwisePower();
835 /** Default Destructor */
836 ~CLElementwisePower();
837 /** Prevent instances of this class from being copied (As this class contains pointers) */
838 CLElementwisePower(const CLElementwisePower &) = delete;
839 /** Default move constructor */
840 CLElementwisePower(CLElementwisePower &&);
841 /** Prevent instances of this class from being copied (As this class contains pointers) */
842 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
843 /** Default move assignment operator */
844 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100845 /** Initialise the kernel's inputs, output and conversion policy.
846 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000847 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
848 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
849 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
850 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
851 * @param[out] output Output tensor. Data types supported:F16/F32.
852 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100853 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000854 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100855 /** Initialise the kernel's inputs, output and conversion policy.
856 *
857 * @param[in] compile_context The compile context to be used.
858 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
859 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
860 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
861 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
862 * @param[out] output Output tensor. Data types supported:F16/F32.
863 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
864 */
865 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Usama Arif52c54f62019-05-14 10:22:36 +0100866 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power
867 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000868 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
869 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
870 * @param[in] output Output tensor info. Data types supported: F16/F32.
871 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100872 *
873 * @return a status
874 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000875 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100876
877 // Inherited methods overridden:
878 void run() override;
879
880private:
881 struct Impl;
882 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100883};
giuros01164a2722018-11-20 18:34:46 +0000884} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000885#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */