blob: ecebac435aaa04bf64f1ec8cf7418e293df08be8 [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Matthew Benthamf1aeab92023-05-30 13:35:34 +00002 * Copyright (c) 2018-2021, 2023 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Matthew Benthamf1aeab92023-05-30 13:35:34 +000027#include "arm_compute/core/ActivationLayerInfo.h"
Michalis Spyrouad7515d2020-07-24 00:02:23 +010028#include "arm_compute/runtime/CL/ICLOperator.h"
29#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000030
31namespace arm_compute
32{
33class ICLTensor;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010034class CLCompileContext;
35class ITensorInfo;
giuros01164a2722018-11-20 18:34:46 +000036
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +000037/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +010038 *
39 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
40 * @note The function performs an arithmetic addition between two tensors.
41 */
42class CLArithmeticAddition : public IFunction
43{
44public:
45 /** Default Constructor */
46 CLArithmeticAddition();
47 /** Default Destructor */
48 ~CLArithmeticAddition();
49 /** Prevent instances of this class from being copied (As this class contains pointers) */
50 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
51 /** Default move constructor */
52 CLArithmeticAddition(CLArithmeticAddition &&);
53 /** Prevent instances of this class from being copied (As this class contains pointers) */
54 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
55 /** Default move assignment operator */
56 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +000057 /** Initialise the kernel's inputs, output and conversion policy.
58 *
Sheri Zhang6124ce62021-05-04 14:03:13 +010059 * Valid data layouts:
60 * - All
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010061 *
Sheri Zhang6124ce62021-05-04 14:03:13 +010062 * Valid data type configurations:
63 * |src0 |src1 |dst |
64 * |:--------------|:--------------|:--------------|
65 * |QASYMM8 |QASYMM8 |QASYMM8 |
66 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
67 * |QSYMM16 |QSYMM16 |QASYMM16 |
68 * |U8 |U8 |U8 |
69 * |U8 |U8 |S16 |
70 * |U8 |S16 |S16 |
71 * |S16 |U8 |S16 |
72 * |S16 |S16 |S16 |
73 * |S32 |S32 |S32 |
74 * |F16 |F16 |F16 |
75 * |F32 |F32 |F32 |
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010076 *
77 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000078 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010079 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000080 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010081 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000082 * @param[in] policy Policy to use to handle overflow.
83 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +000084 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000085 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +010086 /** Initialise the kernel's inputs, output and conversion policy.
87 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010088 * Valid configurations (Input1,Input2) -> Output :
89 *
90 * - (U8,U8) -> U8
91 * - (U8,U8) -> S16
92 * - (S16,U8) -> S16
93 * - (U8,S16) -> S16
94 * - (S16,S16) -> S16
95 * - (S32,S32) -> S32
96 * - (F16,F16) -> F16
97 * - (F32,F32) -> F32
98 * - (QASYMM8,QASYMM8) -> QASYMM8
99 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
100 * - (QSYMM16,QSYMM16) -> QSYMM16
101 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100102 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100103 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100104 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100105 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100106 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100107 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100108 * @param[in] policy Policy to use to handle overflow.
109 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
110 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100111 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
112 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000113 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100114 *
115 * Valid configurations (Input1,Input2) -> Output :
116 *
117 * - (U8,U8) -> U8
118 * - (U8,U8) -> S16
119 * - (S16,U8) -> S16
120 * - (U8,S16) -> S16
121 * - (S16,S16) -> S16
122 * - (S32,S32) -> S32
123 * - (F16,F16) -> F16
124 * - (F32,F32) -> F32
125 * - (QASYMM8,QASYMM8) -> QASYMM8
126 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
127 * - (QSYMM16,QSYMM16) -> QSYMM16
128 *
129 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
130 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
131 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
132 * @param[in] policy Policy to use to handle overflow.
133 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
134 *
135 * @return a status
136 */
137 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
138
139 // Inherited methods overridden:
140 void run() override;
141
142private:
143 struct Impl;
144 std::unique_ptr<Impl> _impl;
145};
146
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000147/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100148 *
149 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
150 * @note The function performs an arithmetic subtraction between two tensors.
151 */
152class CLArithmeticSubtraction : public IFunction
153{
154public:
155 /** Default Constructor */
156 CLArithmeticSubtraction();
157 /** Default Destructor */
158 ~CLArithmeticSubtraction();
159 /** Prevent instances of this class from being copied (As this class contains pointers) */
160 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
161 /** Default move constructor */
162 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
163 /** Prevent instances of this class from being copied (As this class contains pointers) */
164 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
165 /** Default move assignment operator */
166 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
167 /** Initialise the kernel's inputs, output and conversion policy.
168 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100169 * Valid data layouts:
170 * - All
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100171 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100172 * Valid data type configurations:
173 * |src0 |src1 |dst |
174 * |:--------------|:--------------|:--------------|
175 * |QASYMM8 |QASYMM8 |QASYMM8 |
176 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
177 * |QSYMM16 |QSYMM16 |QASYMM16 |
178 * |U8 |U8 |U8 |
179 * |U8 |U8 |S16 |
180 * |U8 |S16 |S16 |
181 * |S16 |U8 |S16 |
182 * |S16 |S16 |S16 |
183 * |S32 |S32 |S32 |
184 * |F16 |F16 |F16 |
185 * |F32 |F32 |F32 |
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100186 *
187 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
188 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
189 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
190 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
191 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
192 * @param[in] policy Policy to use to handle overflow.
193 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
194 */
195 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
196 /** Initialise the kernel's inputs, output and conversion policy.
197 *
198 * Valid configurations (Input1,Input2) -> Output :
199 *
200 * - (U8,U8) -> U8
201 * - (U8,U8) -> S16
202 * - (S16,U8) -> S16
203 * - (U8,S16) -> S16
204 * - (S16,S16) -> S16
205 * - (S32,S32) -> S32
206 * - (F16,F16) -> F16
207 * - (F32,F32) -> F32
208 * - (QASYMM8,QASYMM8) -> QASYMM8
209 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
210 * - (QSYMM16,QSYMM16) -> QSYMM16
211 *
212 * @param[in] compile_context The compile context to be used.
213 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
214 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
215 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
216 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
217 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
218 * @param[in] policy Policy to use to handle overflow.
219 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
220 */
221 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
222 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000223 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
giuros01164a2722018-11-20 18:34:46 +0000224 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100225 * Valid configurations (Input1,Input2) -> Output :
226 *
227 * - (U8,U8) -> U8
228 * - (U8,U8) -> S16
229 * - (S16,U8) -> S16
230 * - (U8,S16) -> S16
231 * - (S16,S16) -> S16
232 * - (S32,S32) -> S32
233 * - (F16,F16) -> F16
234 * - (F32,F32) -> F32
235 * - (QASYMM8,QASYMM8) -> QASYMM8
236 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
237 * - (QSYMM16,QSYMM16) -> QSYMM16
238 *
239 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
240 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
241 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000242 * @param[in] policy Policy to use to handle overflow.
243 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000244 *
245 * @return a status
246 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000247 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100248
249 // Inherited methods overridden:
250 void run() override;
251
252private:
253 struct Impl;
254 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000255};
256
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000257/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for division
giuros01164a2722018-11-20 18:34:46 +0000258 *
259 * @note The tensor data type for the inputs must be F16/F32.
260 * @note The function performs an arithmetic division between two tensors.
261 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100262class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000263{
264public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100265 /** Default Constructor */
266 CLArithmeticDivision();
267 /** Default Destructor */
268 ~CLArithmeticDivision();
269 /** Prevent instances of this class from being copied (As this class contains pointers) */
270 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
271 /** Default move constructor */
272 CLArithmeticDivision(CLArithmeticDivision &&);
273 /** Prevent instances of this class from being copied (As this class contains pointers) */
274 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
275 /** Default move assignment operator */
276 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000277 /** Initialise the kernel's inputs, output.
278 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100279 * Valid data layouts:
280 * - All
281 *
282 * Valid data type configurations:
283 * |src0 |src1 |dst |
284 * |:--------------|:--------------|:--------------|
285 * |F16 |F16 |F16 |
286 * |F32 |F32 |F32 |
287 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000288 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
289 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
290 * @param[in, out] input2 Second tensor input. Same as @p input1.
291 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
292 * @param[out] output Output tensor. Data types supported: Same as @p input1.
293 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000294 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000295 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100296 /** Initialise the kernel's inputs, output.
297 *
298 * @param[in] compile_context The compile context to be used.
299 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
300 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
301 * @param[in, out] input2 Second tensor input. Same as @p input1.
302 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
303 * @param[out] output Output tensor. Data types supported: Same as @p input1.
304 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
305 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100306 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000307 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
308 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000309 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
310 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
311 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
312 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000313 *
314 * @return a status
315 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000316 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100317
318 // Inherited methods overridden:
319 void run() override;
320
321private:
322 struct Impl;
323 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000324};
325
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000326/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000327 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100328 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000329 * @note The function performs a max operation between two tensors.
330 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100331class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000332{
333public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100334 /** Default Constructor */
335 CLElementwiseMax();
336 /** Default Destructor */
337 ~CLElementwiseMax();
338 /** Prevent instances of this class from being copied (As this class contains pointers) */
339 CLElementwiseMax(const CLElementwiseMax &) = delete;
340 /** Default move constructor */
341 CLElementwiseMax(CLElementwiseMax &&);
342 /** Prevent instances of this class from being copied (As this class contains pointers) */
343 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
344 /** Default move assignment operator */
345 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000346 /** Initialise the kernel's inputs, output and conversion policy.
347 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100348 * Valid data layouts:
349 * - All
350 *
351 * Valid data type configurations:
352 * |src0 |src1 |dst |
353 * |:--------------|:--------------|:--------------|
354 * |QASYMM8 |QASYMM8 |QASYMM8 |
355 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
356 * |QSYMM16 |QSYMM16 |QASYMM16 |
357 * |U8 |U8 |U8 |
358 * |S16 |S16 |S16 |
359 * |S32 |S32 |S32 |
360 * |U32 |U32 |U32 |
361 * |F16 |F16 |F16 |
362 * |F32 |F32 |F32 |
363 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100364 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000365 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100366 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000367 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100368 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000369 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000370 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000371 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100372 /** Initialise the kernel's inputs, output and conversion policy.
373 *
374 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100375 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100376 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100377 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100378 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100379 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100380 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
381 */
382 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000383 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000384 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100385 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
386 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
387 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000388 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000389 *
390 * @return a status
391 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000392 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100393
394 // Inherited methods overridden:
395 void run() override;
396
397private:
398 struct Impl;
399 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000400};
401
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000402/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000403 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100404 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000405 * @note The function performs a max operation between two tensors.
406 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100407class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000408{
409public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100410 /** Default Constructor */
411 CLElementwiseMin();
412 /** Default Destructor */
413 ~CLElementwiseMin();
414 /** Prevent instances of this class from being copied (As this class contains pointers) */
415 CLElementwiseMin(const CLElementwiseMin &) = delete;
416 /** Default move constructor */
417 CLElementwiseMin(CLElementwiseMin &&);
418 /** Prevent instances of this class from being copied (As this class contains pointers) */
419 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
420 /** Default move assignment operator */
421 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000422 /** Initialise the kernel's inputs, output and conversion policy.
423 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100424 * Valid data layouts:
425 * - All
426 *
427 * Valid data type configurations:
428 * |src0 |src1 |dst |
429 * |:--------------|:--------------|:--------------|
430 * |QASYMM8 |QASYMM8 |QASYMM8 |
431 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
432 * |QSYMM16 |QSYMM16 |QASYMM16 |
433 * |U8 |U8 |U8 |
434 * |S16 |S16 |S16 |
435 * |S32 |S32 |S32 |
436 * |U32 |U32 |U32 |
437 * |F16 |F16 |F16 |
438 * |F32 |F32 |F32 |
439 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100440 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000441 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100442 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000443 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100444 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000445 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000446 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000447 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100448 /** Initialise the kernel's inputs, output and conversion policy.
449 *
450 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100451 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100452 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100453 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100454 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100455 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100456 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
457 */
458 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000459 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000460 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100461 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
462 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
463 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000464 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000465 *
466 * @return a status
467 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000468 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100469
470 // Inherited methods overridden:
471 void run() override;
472
473private:
474 struct Impl;
475 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000476};
477
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000478/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000479 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100480 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000481 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
482 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100483class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000484{
485public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100486 /** Default Constructor */
487 CLElementwiseSquaredDiff();
488 /** Default Destructor */
489 ~CLElementwiseSquaredDiff();
490 /** Prevent instances of this class from being copied (As this class contains pointers) */
491 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
492 /** Default move constructor */
493 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
494 /** Prevent instances of this class from being copied (As this class contains pointers) */
495 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
496 /** Default move assignment operator */
497 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000498 /** Initialise the kernel's inputs, output and conversion policy.
499 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100500 * Valid data layouts:
501 * - All
502 *
503 * Valid data type configurations:
504 * |src0 |src1 |dst |
505 * |:--------------|:--------------|:--------------|
506 * |QASYMM8 |QASYMM8 |QASYMM8 |
507 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
508 * |QSYMM16 |QSYMM16 |QASYMM16 |
509 * |U8 |U8 |U8 |
510 * |S16 |S16 |S16 |
511 * |F16 |F16 |F16 |
512 * |F32 |F32 |F32 |
513 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100514 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000515 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100516 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000517 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100518 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000519 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000520 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000521 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100522 /** Initialise the kernel's inputs, output and conversion policy.
523 *
524 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100525 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100526 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100527 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100528 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100529 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100530 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
531 */
532 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000533 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000534 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100535 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
536 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
537 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000538 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000539 *
540 * @return a status
541 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000542 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100543
544 // Inherited methods overridden:
545 void run() override;
546
547private:
548 struct Impl;
549 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000550};
Usama Arif52c54f62019-05-14 10:22:36 +0100551
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000552/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100553 *
554 * @note The tensor data type for the inputs must be F16/F32.
555 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
556 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100557class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100558{
559public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100560 /** Default Constructor */
561 CLElementwisePower();
562 /** Default Destructor */
563 ~CLElementwisePower();
564 /** Prevent instances of this class from being copied (As this class contains pointers) */
565 CLElementwisePower(const CLElementwisePower &) = delete;
566 /** Default move constructor */
567 CLElementwisePower(CLElementwisePower &&);
568 /** Prevent instances of this class from being copied (As this class contains pointers) */
569 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
570 /** Default move assignment operator */
571 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100572 /** Initialise the kernel's inputs, output and conversion policy.
573 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100574 * Valid data layouts:
575 * - All
576 *
577 * Valid data type configurations:
578 * |src0 |src1 |dst |
579 * |:--------------|:--------------|:--------------|
580 * |F16 |F16 |F16 |
581 * |F32 |F32 |F32 |
582 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000583 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
584 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
585 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
586 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
587 * @param[out] output Output tensor. Data types supported:F16/F32.
588 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100589 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000590 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100591 /** Initialise the kernel's inputs, output and conversion policy.
592 *
593 * @param[in] compile_context The compile context to be used.
594 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
595 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
596 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
597 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
598 * @param[out] output Output tensor. Data types supported:F16/F32.
599 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
600 */
601 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000602 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100603 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000604 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
605 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
606 * @param[in] output Output tensor info. Data types supported: F16/F32.
607 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100608 *
609 * @return a status
610 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000611 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100612
613 // Inherited methods overridden:
614 void run() override;
615
616private:
617 struct Impl;
618 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100619};
giuros01164a2722018-11-20 18:34:46 +0000620} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000621#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */