blob: 2b291517f353281d5d90d9bc89c991953518141a [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +00002 * Copyright (c) 2018-2021 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Michalis Spyrouad7515d2020-07-24 00:02:23 +010027#include "arm_compute/runtime/CL/ICLOperator.h"
28#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000029
30namespace arm_compute
31{
32class ICLTensor;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010033class CLCompileContext;
34class ITensorInfo;
giuros01164a2722018-11-20 18:34:46 +000035
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +000036/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +010037 *
38 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
39 * @note The function performs an arithmetic addition between two tensors.
40 */
41class CLArithmeticAddition : public IFunction
42{
43public:
44 /** Default Constructor */
45 CLArithmeticAddition();
46 /** Default Destructor */
47 ~CLArithmeticAddition();
48 /** Prevent instances of this class from being copied (As this class contains pointers) */
49 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
50 /** Default move constructor */
51 CLArithmeticAddition(CLArithmeticAddition &&);
52 /** Prevent instances of this class from being copied (As this class contains pointers) */
53 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
54 /** Default move assignment operator */
55 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +000056 /** Initialise the kernel's inputs, output and conversion policy.
57 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010058 * Valid configurations (Input1,Input2) -> Output :
59 *
60 * - (U8,U8) -> U8
61 * - (U8,U8) -> S16
62 * - (S16,U8) -> S16
63 * - (U8,S16) -> S16
64 * - (S16,S16) -> S16
65 * - (S32,S32) -> S32
66 * - (F16,F16) -> F16
67 * - (F32,F32) -> F32
68 * - (QASYMM8,QASYMM8) -> QASYMM8
69 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
70 * - (QSYMM16,QSYMM16) -> QSYMM16
71 *
72 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000073 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010074 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000075 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010076 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000077 * @param[in] policy Policy to use to handle overflow.
78 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +000079 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000080 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +010081 /** Initialise the kernel's inputs, output and conversion policy.
82 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010083 * Valid configurations (Input1,Input2) -> Output :
84 *
85 * - (U8,U8) -> U8
86 * - (U8,U8) -> S16
87 * - (S16,U8) -> S16
88 * - (U8,S16) -> S16
89 * - (S16,S16) -> S16
90 * - (S32,S32) -> S32
91 * - (F16,F16) -> F16
92 * - (F32,F32) -> F32
93 * - (QASYMM8,QASYMM8) -> QASYMM8
94 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
95 * - (QSYMM16,QSYMM16) -> QSYMM16
96 *
Manuel Bottini2b84be52020-04-08 10:15:51 +010097 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010098 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +010099 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100100 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100101 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100102 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100103 * @param[in] policy Policy to use to handle overflow.
104 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
105 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100106 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
107 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000108 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100109 *
110 * Valid configurations (Input1,Input2) -> Output :
111 *
112 * - (U8,U8) -> U8
113 * - (U8,U8) -> S16
114 * - (S16,U8) -> S16
115 * - (U8,S16) -> S16
116 * - (S16,S16) -> S16
117 * - (S32,S32) -> S32
118 * - (F16,F16) -> F16
119 * - (F32,F32) -> F32
120 * - (QASYMM8,QASYMM8) -> QASYMM8
121 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
122 * - (QSYMM16,QSYMM16) -> QSYMM16
123 *
124 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
125 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
126 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
127 * @param[in] policy Policy to use to handle overflow.
128 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
129 *
130 * @return a status
131 */
132 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
133
134 // Inherited methods overridden:
135 void run() override;
136
137private:
138 struct Impl;
139 std::unique_ptr<Impl> _impl;
140};
141
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000142/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100143 *
144 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
145 * @note The function performs an arithmetic subtraction between two tensors.
146 */
147class CLArithmeticSubtraction : public IFunction
148{
149public:
150 /** Default Constructor */
151 CLArithmeticSubtraction();
152 /** Default Destructor */
153 ~CLArithmeticSubtraction();
154 /** Prevent instances of this class from being copied (As this class contains pointers) */
155 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
156 /** Default move constructor */
157 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
158 /** Prevent instances of this class from being copied (As this class contains pointers) */
159 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
160 /** Default move assignment operator */
161 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
162 /** Initialise the kernel's inputs, output and conversion policy.
163 *
164 * Valid configurations (Input1,Input2) -> Output :
165 *
166 * - (U8,U8) -> U8
167 * - (U8,U8) -> S16
168 * - (S16,U8) -> S16
169 * - (U8,S16) -> S16
170 * - (S16,S16) -> S16
171 * - (S32,S32) -> S32
172 * - (F16,F16) -> F16
173 * - (F32,F32) -> F32
174 * - (QASYMM8,QASYMM8) -> QASYMM8
175 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
176 * - (QSYMM16,QSYMM16) -> QSYMM16
177 *
178 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
179 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
180 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
181 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
182 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
183 * @param[in] policy Policy to use to handle overflow.
184 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
185 */
186 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
187 /** Initialise the kernel's inputs, output and conversion policy.
188 *
189 * Valid configurations (Input1,Input2) -> Output :
190 *
191 * - (U8,U8) -> U8
192 * - (U8,U8) -> S16
193 * - (S16,U8) -> S16
194 * - (U8,S16) -> S16
195 * - (S16,S16) -> S16
196 * - (S32,S32) -> S32
197 * - (F16,F16) -> F16
198 * - (F32,F32) -> F32
199 * - (QASYMM8,QASYMM8) -> QASYMM8
200 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
201 * - (QSYMM16,QSYMM16) -> QSYMM16
202 *
203 * @param[in] compile_context The compile context to be used.
204 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
205 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
206 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
207 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
208 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
209 * @param[in] policy Policy to use to handle overflow.
210 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
211 */
212 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
213 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000214 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
giuros01164a2722018-11-20 18:34:46 +0000215 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100216 * Valid configurations (Input1,Input2) -> Output :
217 *
218 * - (U8,U8) -> U8
219 * - (U8,U8) -> S16
220 * - (S16,U8) -> S16
221 * - (U8,S16) -> S16
222 * - (S16,S16) -> S16
223 * - (S32,S32) -> S32
224 * - (F16,F16) -> F16
225 * - (F32,F32) -> F32
226 * - (QASYMM8,QASYMM8) -> QASYMM8
227 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
228 * - (QSYMM16,QSYMM16) -> QSYMM16
229 *
230 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
231 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
232 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000233 * @param[in] policy Policy to use to handle overflow.
234 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000235 *
236 * @return a status
237 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000238 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100239
240 // Inherited methods overridden:
241 void run() override;
242
243private:
244 struct Impl;
245 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000246};
247
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000248/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for division
giuros01164a2722018-11-20 18:34:46 +0000249 *
250 * @note The tensor data type for the inputs must be F16/F32.
251 * @note The function performs an arithmetic division between two tensors.
252 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100253class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000254{
255public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100256 /** Default Constructor */
257 CLArithmeticDivision();
258 /** Default Destructor */
259 ~CLArithmeticDivision();
260 /** Prevent instances of this class from being copied (As this class contains pointers) */
261 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
262 /** Default move constructor */
263 CLArithmeticDivision(CLArithmeticDivision &&);
264 /** Prevent instances of this class from being copied (As this class contains pointers) */
265 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
266 /** Default move assignment operator */
267 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000268 /** Initialise the kernel's inputs, output.
269 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000270 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
271 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
272 * @param[in, out] input2 Second tensor input. Same as @p input1.
273 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
274 * @param[out] output Output tensor. Data types supported: Same as @p input1.
275 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000276 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000277 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100278 /** Initialise the kernel's inputs, output.
279 *
280 * @param[in] compile_context The compile context to be used.
281 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
282 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
283 * @param[in, out] input2 Second tensor input. Same as @p input1.
284 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
285 * @param[out] output Output tensor. Data types supported: Same as @p input1.
286 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
287 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100288 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000289 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
290 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000291 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
292 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
293 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
294 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000295 *
296 * @return a status
297 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000298 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100299
300 // Inherited methods overridden:
301 void run() override;
302
303private:
304 struct Impl;
305 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000306};
307
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000308/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000309 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100310 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000311 * @note The function performs a max operation between two tensors.
312 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100313class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000314{
315public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100316 /** Default Constructor */
317 CLElementwiseMax();
318 /** Default Destructor */
319 ~CLElementwiseMax();
320 /** Prevent instances of this class from being copied (As this class contains pointers) */
321 CLElementwiseMax(const CLElementwiseMax &) = delete;
322 /** Default move constructor */
323 CLElementwiseMax(CLElementwiseMax &&);
324 /** Prevent instances of this class from being copied (As this class contains pointers) */
325 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
326 /** Default move assignment operator */
327 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000328 /** Initialise the kernel's inputs, output and conversion policy.
329 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100330 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000331 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100332 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000333 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100334 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000335 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000336 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000337 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100338 /** Initialise the kernel's inputs, output and conversion policy.
339 *
340 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100341 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100342 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100343 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100344 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100345 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100346 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
347 */
348 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000349 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000350 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100351 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
352 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
353 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000354 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000355 *
356 * @return a status
357 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000358 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100359
360 // Inherited methods overridden:
361 void run() override;
362
363private:
364 struct Impl;
365 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000366};
367
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000368/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000369 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100370 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000371 * @note The function performs a max operation between two tensors.
372 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100373class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000374{
375public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100376 /** Default Constructor */
377 CLElementwiseMin();
378 /** Default Destructor */
379 ~CLElementwiseMin();
380 /** Prevent instances of this class from being copied (As this class contains pointers) */
381 CLElementwiseMin(const CLElementwiseMin &) = delete;
382 /** Default move constructor */
383 CLElementwiseMin(CLElementwiseMin &&);
384 /** Prevent instances of this class from being copied (As this class contains pointers) */
385 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
386 /** Default move assignment operator */
387 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000388 /** Initialise the kernel's inputs, output and conversion policy.
389 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100390 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000391 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100392 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000393 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100394 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000395 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000396 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000397 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100398 /** Initialise the kernel's inputs, output and conversion policy.
399 *
400 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100401 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100402 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100403 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100404 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100405 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100406 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
407 */
408 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000409 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000410 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100411 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
412 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
413 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000414 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000415 *
416 * @return a status
417 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000418 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100419
420 // Inherited methods overridden:
421 void run() override;
422
423private:
424 struct Impl;
425 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000426};
427
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000428/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000429 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100430 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000431 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
432 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100433class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000434{
435public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100436 /** Default Constructor */
437 CLElementwiseSquaredDiff();
438 /** Default Destructor */
439 ~CLElementwiseSquaredDiff();
440 /** Prevent instances of this class from being copied (As this class contains pointers) */
441 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
442 /** Default move constructor */
443 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
444 /** Prevent instances of this class from being copied (As this class contains pointers) */
445 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
446 /** Default move assignment operator */
447 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000448 /** Initialise the kernel's inputs, output and conversion policy.
449 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100450 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000451 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100452 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000453 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100454 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000455 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000456 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000457 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100458 /** Initialise the kernel's inputs, output and conversion policy.
459 *
460 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100461 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100462 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100463 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100464 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100465 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100466 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
467 */
468 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000469 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000470 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100471 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
472 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
473 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000474 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000475 *
476 * @return a status
477 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000478 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100479
480 // Inherited methods overridden:
481 void run() override;
482
483private:
484 struct Impl;
485 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000486};
Usama Arif52c54f62019-05-14 10:22:36 +0100487
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000488/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100489 *
490 * @note The tensor data type for the inputs must be F16/F32.
491 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
492 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100493class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100494{
495public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100496 /** Default Constructor */
497 CLElementwisePower();
498 /** Default Destructor */
499 ~CLElementwisePower();
500 /** Prevent instances of this class from being copied (As this class contains pointers) */
501 CLElementwisePower(const CLElementwisePower &) = delete;
502 /** Default move constructor */
503 CLElementwisePower(CLElementwisePower &&);
504 /** Prevent instances of this class from being copied (As this class contains pointers) */
505 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
506 /** Default move assignment operator */
507 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100508 /** Initialise the kernel's inputs, output and conversion policy.
509 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000510 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
511 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
512 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
513 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
514 * @param[out] output Output tensor. Data types supported:F16/F32.
515 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100516 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000517 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100518 /** Initialise the kernel's inputs, output and conversion policy.
519 *
520 * @param[in] compile_context The compile context to be used.
521 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
522 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
523 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
524 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
525 * @param[out] output Output tensor. Data types supported:F16/F32.
526 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
527 */
528 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000529 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100530 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000531 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
532 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
533 * @param[in] output Output tensor info. Data types supported: F16/F32.
534 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100535 *
536 * @return a status
537 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000538 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100539
540 // Inherited methods overridden:
541 void run() override;
542
543private:
544 struct Impl;
545 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100546};
giuros01164a2722018-11-20 18:34:46 +0000547} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000548#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */