blob: 555e84a2510ac66799e662b13ef9a10ba4371744 [file] [log] [blame]
giuros01164a2722018-11-20 18:34:46 +00001/*
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +00002 * Copyright (c) 2018-2021 Arm Limited.
giuros01164a2722018-11-20 18:34:46 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
George Wort5a97b282018-12-21 16:21:04 +000020 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
giuros01164a2722018-11-20 18:34:46 +000021 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
25#define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H
giuros01164a2722018-11-20 18:34:46 +000026
Michalis Spyrouad7515d2020-07-24 00:02:23 +010027#include "arm_compute/runtime/CL/ICLOperator.h"
28#include "arm_compute/runtime/IFunction.h"
giuros01164a2722018-11-20 18:34:46 +000029
30namespace arm_compute
31{
32class ICLTensor;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010033class CLCompileContext;
34class ITensorInfo;
giuros01164a2722018-11-20 18:34:46 +000035
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +000036/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +010037 *
38 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
39 * @note The function performs an arithmetic addition between two tensors.
40 */
41class CLArithmeticAddition : public IFunction
42{
43public:
44 /** Default Constructor */
45 CLArithmeticAddition();
46 /** Default Destructor */
47 ~CLArithmeticAddition();
48 /** Prevent instances of this class from being copied (As this class contains pointers) */
49 CLArithmeticAddition(const CLArithmeticAddition &) = delete;
50 /** Default move constructor */
51 CLArithmeticAddition(CLArithmeticAddition &&);
52 /** Prevent instances of this class from being copied (As this class contains pointers) */
53 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete;
54 /** Default move assignment operator */
55 CLArithmeticAddition &operator=(CLArithmeticAddition &&);
giuros01164a2722018-11-20 18:34:46 +000056 /** Initialise the kernel's inputs, output and conversion policy.
57 *
Sheri Zhang6124ce62021-05-04 14:03:13 +010058 * Valid data layouts:
59 * - All
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010060 *
Sheri Zhang6124ce62021-05-04 14:03:13 +010061 * Valid data type configurations:
62 * |src0 |src1 |dst |
63 * |:--------------|:--------------|:--------------|
64 * |QASYMM8 |QASYMM8 |QASYMM8 |
65 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
66 * |QSYMM16 |QSYMM16 |QASYMM16 |
67 * |U8 |U8 |U8 |
68 * |U8 |U8 |S16 |
69 * |U8 |S16 |S16 |
70 * |S16 |U8 |S16 |
71 * |S16 |S16 |S16 |
72 * |S32 |S32 |S32 |
73 * |F16 |F16 |F16 |
74 * |F32 |F32 |F32 |
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010075 *
76 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000077 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010078 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000079 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010080 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000081 * @param[in] policy Policy to use to handle overflow.
82 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +000083 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000084 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +010085 /** Initialise the kernel's inputs, output and conversion policy.
86 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010087 * Valid configurations (Input1,Input2) -> Output :
88 *
89 * - (U8,U8) -> U8
90 * - (U8,U8) -> S16
91 * - (S16,U8) -> S16
92 * - (U8,S16) -> S16
93 * - (S16,S16) -> S16
94 * - (S32,S32) -> S32
95 * - (F16,F16) -> F16
96 * - (F32,F32) -> F32
97 * - (QASYMM8,QASYMM8) -> QASYMM8
98 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
99 * - (QSYMM16,QSYMM16) -> QSYMM16
100 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100101 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100102 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100103 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100104 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100105 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100106 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100107 * @param[in] policy Policy to use to handle overflow.
108 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
109 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100110 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
111 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000112 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for addition
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100113 *
114 * Valid configurations (Input1,Input2) -> Output :
115 *
116 * - (U8,U8) -> U8
117 * - (U8,U8) -> S16
118 * - (S16,U8) -> S16
119 * - (U8,S16) -> S16
120 * - (S16,S16) -> S16
121 * - (S32,S32) -> S32
122 * - (F16,F16) -> F16
123 * - (F32,F32) -> F32
124 * - (QASYMM8,QASYMM8) -> QASYMM8
125 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
126 * - (QSYMM16,QSYMM16) -> QSYMM16
127 *
128 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
129 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
130 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
131 * @param[in] policy Policy to use to handle overflow.
132 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
133 *
134 * @return a status
135 */
136 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
137
138 // Inherited methods overridden:
139 void run() override;
140
141private:
142 struct Impl;
143 std::unique_ptr<Impl> _impl;
144};
145
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000146/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100147 *
148 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
149 * @note The function performs an arithmetic subtraction between two tensors.
150 */
151class CLArithmeticSubtraction : public IFunction
152{
153public:
154 /** Default Constructor */
155 CLArithmeticSubtraction();
156 /** Default Destructor */
157 ~CLArithmeticSubtraction();
158 /** Prevent instances of this class from being copied (As this class contains pointers) */
159 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete;
160 /** Default move constructor */
161 CLArithmeticSubtraction(CLArithmeticSubtraction &&);
162 /** Prevent instances of this class from being copied (As this class contains pointers) */
163 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete;
164 /** Default move assignment operator */
165 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&);
166 /** Initialise the kernel's inputs, output and conversion policy.
167 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100168 * Valid data layouts:
169 * - All
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100170 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100171 * Valid data type configurations:
172 * |src0 |src1 |dst |
173 * |:--------------|:--------------|:--------------|
174 * |QASYMM8 |QASYMM8 |QASYMM8 |
175 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
176 * |QSYMM16 |QSYMM16 |QASYMM16 |
177 * |U8 |U8 |U8 |
178 * |U8 |U8 |S16 |
179 * |U8 |S16 |S16 |
180 * |S16 |U8 |S16 |
181 * |S16 |S16 |S16 |
182 * |S32 |S32 |S32 |
183 * |F16 |F16 |F16 |
184 * |F32 |F32 |F32 |
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100185 *
186 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
187 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
188 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
189 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
190 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
191 * @param[in] policy Policy to use to handle overflow.
192 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
193 */
194 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
195 /** Initialise the kernel's inputs, output and conversion policy.
196 *
197 * Valid configurations (Input1,Input2) -> Output :
198 *
199 * - (U8,U8) -> U8
200 * - (U8,U8) -> S16
201 * - (S16,U8) -> S16
202 * - (U8,S16) -> S16
203 * - (S16,S16) -> S16
204 * - (S32,S32) -> S32
205 * - (F16,F16) -> F16
206 * - (F32,F32) -> F32
207 * - (QASYMM8,QASYMM8) -> QASYMM8
208 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
209 * - (QSYMM16,QSYMM16) -> QSYMM16
210 *
211 * @param[in] compile_context The compile context to be used.
212 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
213 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
214 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
215 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
216 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
217 * @param[in] policy Policy to use to handle overflow.
218 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
219 */
220 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy,
221 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000222 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClSaturatedArithmeticKernel for subtraction
giuros01164a2722018-11-20 18:34:46 +0000223 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100224 * Valid configurations (Input1,Input2) -> Output :
225 *
226 * - (U8,U8) -> U8
227 * - (U8,U8) -> S16
228 * - (S16,U8) -> S16
229 * - (U8,S16) -> S16
230 * - (S16,S16) -> S16
231 * - (S32,S32) -> S32
232 * - (F16,F16) -> F16
233 * - (F32,F32) -> F32
234 * - (QASYMM8,QASYMM8) -> QASYMM8
235 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
236 * - (QSYMM16,QSYMM16) -> QSYMM16
237 *
238 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
239 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
240 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000241 * @param[in] policy Policy to use to handle overflow.
242 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000243 *
244 * @return a status
245 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000246 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100247
248 // Inherited methods overridden:
249 void run() override;
250
251private:
252 struct Impl;
253 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000254};
255
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000256/** Basic function to run @ref opencl::kernels::ClSaturatedArithmeticKernel for division
giuros01164a2722018-11-20 18:34:46 +0000257 *
258 * @note The tensor data type for the inputs must be F16/F32.
259 * @note The function performs an arithmetic division between two tensors.
260 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100261class CLArithmeticDivision : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000262{
263public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100264 /** Default Constructor */
265 CLArithmeticDivision();
266 /** Default Destructor */
267 ~CLArithmeticDivision();
268 /** Prevent instances of this class from being copied (As this class contains pointers) */
269 CLArithmeticDivision(const CLArithmeticDivision &) = delete;
270 /** Default move constructor */
271 CLArithmeticDivision(CLArithmeticDivision &&);
272 /** Prevent instances of this class from being copied (As this class contains pointers) */
273 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete;
274 /** Default move assignment operator */
275 CLArithmeticDivision &operator=(CLArithmeticDivision &&);
giuros01164a2722018-11-20 18:34:46 +0000276 /** Initialise the kernel's inputs, output.
277 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100278 * Valid data layouts:
279 * - All
280 *
281 * Valid data type configurations:
282 * |src0 |src1 |dst |
283 * |:--------------|:--------------|:--------------|
284 * |F16 |F16 |F16 |
285 * |F32 |F32 |F32 |
286 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000287 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
288 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
289 * @param[in, out] input2 Second tensor input. Same as @p input1.
290 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
291 * @param[out] output Output tensor. Data types supported: Same as @p input1.
292 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000293 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000294 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100295 /** Initialise the kernel's inputs, output.
296 *
297 * @param[in] compile_context The compile context to be used.
298 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
299 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
300 * @param[in, out] input2 Second tensor input. Same as @p input1.
301 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
302 * @param[out] output Output tensor. Data types supported: Same as @p input1.
303 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
304 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100305 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01164a2722018-11-20 18:34:46 +0000306 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
307 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000308 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
309 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
310 * @param[in] output Output tensor info. Data types supported: Same as @p input1.
311 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000312 *
313 * @return a status
314 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000315 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100316
317 // Inherited methods overridden:
318 void run() override;
319
320private:
321 struct Impl;
322 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000323};
324
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000325/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000326 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100327 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000328 * @note The function performs a max operation between two tensors.
329 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100330class CLElementwiseMax : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000331{
332public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100333 /** Default Constructor */
334 CLElementwiseMax();
335 /** Default Destructor */
336 ~CLElementwiseMax();
337 /** Prevent instances of this class from being copied (As this class contains pointers) */
338 CLElementwiseMax(const CLElementwiseMax &) = delete;
339 /** Default move constructor */
340 CLElementwiseMax(CLElementwiseMax &&);
341 /** Prevent instances of this class from being copied (As this class contains pointers) */
342 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete;
343 /** Default move assignment operator */
344 CLElementwiseMax &operator=(CLElementwiseMax &&);
giuros01164a2722018-11-20 18:34:46 +0000345 /** Initialise the kernel's inputs, output and conversion policy.
346 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100347 * Valid data layouts:
348 * - All
349 *
350 * Valid data type configurations:
351 * |src0 |src1 |dst |
352 * |:--------------|:--------------|:--------------|
353 * |QASYMM8 |QASYMM8 |QASYMM8 |
354 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
355 * |QSYMM16 |QSYMM16 |QASYMM16 |
356 * |U8 |U8 |U8 |
357 * |S16 |S16 |S16 |
358 * |S32 |S32 |S32 |
359 * |U32 |U32 |U32 |
360 * |F16 |F16 |F16 |
361 * |F32 |F32 |F32 |
362 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100363 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000364 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100365 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000366 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100367 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000368 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000369 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000370 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100371 /** Initialise the kernel's inputs, output and conversion policy.
372 *
373 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100374 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100375 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100376 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100377 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100378 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100379 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
380 */
381 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000382 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for max
giuros01164a2722018-11-20 18:34:46 +0000383 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100384 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
385 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
386 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000387 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000388 *
389 * @return a status
390 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000391 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100392
393 // Inherited methods overridden:
394 void run() override;
395
396private:
397 struct Impl;
398 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000399};
400
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000401/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000402 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100403 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000404 * @note The function performs a max operation between two tensors.
405 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100406class CLElementwiseMin : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000407{
408public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100409 /** Default Constructor */
410 CLElementwiseMin();
411 /** Default Destructor */
412 ~CLElementwiseMin();
413 /** Prevent instances of this class from being copied (As this class contains pointers) */
414 CLElementwiseMin(const CLElementwiseMin &) = delete;
415 /** Default move constructor */
416 CLElementwiseMin(CLElementwiseMin &&);
417 /** Prevent instances of this class from being copied (As this class contains pointers) */
418 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete;
419 /** Default move assignment operator */
420 CLElementwiseMin &operator=(CLElementwiseMin &&);
giuros01164a2722018-11-20 18:34:46 +0000421 /** Initialise the kernel's inputs, output and conversion policy.
422 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100423 * Valid data layouts:
424 * - All
425 *
426 * Valid data type configurations:
427 * |src0 |src1 |dst |
428 * |:--------------|:--------------|:--------------|
429 * |QASYMM8 |QASYMM8 |QASYMM8 |
430 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
431 * |QSYMM16 |QSYMM16 |QASYMM16 |
432 * |U8 |U8 |U8 |
433 * |S16 |S16 |S16 |
434 * |S32 |S32 |S32 |
435 * |U32 |U32 |U32 |
436 * |F16 |F16 |F16 |
437 * |F32 |F32 |F32 |
438 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100439 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000440 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100441 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000442 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100443 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000444 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000445 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000446 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100447 /** Initialise the kernel's inputs, output and conversion policy.
448 *
449 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100450 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100451 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100452 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100453 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100454 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100455 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
456 */
457 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000458 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for min
giuros01164a2722018-11-20 18:34:46 +0000459 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100460 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32.
461 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
462 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000463 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000464 *
465 * @return a status
466 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000467 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100468
469 // Inherited methods overridden:
470 void run() override;
471
472private:
473 struct Impl;
474 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000475};
476
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000477/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000478 *
Michele Di Giorgio6997fc92019-06-18 10:23:22 +0100479 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32.
giuros01164a2722018-11-20 18:34:46 +0000480 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
481 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100482class CLElementwiseSquaredDiff : public IFunction
giuros01164a2722018-11-20 18:34:46 +0000483{
484public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100485 /** Default Constructor */
486 CLElementwiseSquaredDiff();
487 /** Default Destructor */
488 ~CLElementwiseSquaredDiff();
489 /** Prevent instances of this class from being copied (As this class contains pointers) */
490 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete;
491 /** Default move constructor */
492 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&);
493 /** Prevent instances of this class from being copied (As this class contains pointers) */
494 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete;
495 /** Default move assignment operator */
496 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&);
giuros01164a2722018-11-20 18:34:46 +0000497 /** Initialise the kernel's inputs, output and conversion policy.
498 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100499 * Valid data layouts:
500 * - All
501 *
502 * Valid data type configurations:
503 * |src0 |src1 |dst |
504 * |:--------------|:--------------|:--------------|
505 * |QASYMM8 |QASYMM8 |QASYMM8 |
506 * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
507 * |QSYMM16 |QSYMM16 |QASYMM16 |
508 * |U8 |U8 |U8 |
509 * |S16 |S16 |S16 |
510 * |F16 |F16 |F16 |
511 * |F32 |F32 |F32 |
512 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100513 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000514 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100515 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000516 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100517 * @param[out] output Output tensor. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000518 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000519 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000520 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100521 /** Initialise the kernel's inputs, output and conversion policy.
522 *
523 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100524 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100525 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100526 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100527 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100528 * @param[out] output Output tensor. Data types supported: same as @p input1.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100529 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
530 */
531 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000532 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for squared difference
giuros01164a2722018-11-20 18:34:46 +0000533 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100534 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
535 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1.
536 * @param[in] output Output tensor info. Data types supported: same as @p input1.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000537 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
giuros01164a2722018-11-20 18:34:46 +0000538 *
539 * @return a status
540 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000541 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100542
543 // Inherited methods overridden:
544 void run() override;
545
546private:
547 struct Impl;
548 std::unique_ptr<Impl> _impl;
giuros01164a2722018-11-20 18:34:46 +0000549};
Usama Arif52c54f62019-05-14 10:22:36 +0100550
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000551/** Basic function to run @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100552 *
553 * @note The tensor data type for the inputs must be F16/F32.
554 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i])
555 */
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100556class CLElementwisePower : public IFunction
Usama Arif52c54f62019-05-14 10:22:36 +0100557{
558public:
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100559 /** Default Constructor */
560 CLElementwisePower();
561 /** Default Destructor */
562 ~CLElementwisePower();
563 /** Prevent instances of this class from being copied (As this class contains pointers) */
564 CLElementwisePower(const CLElementwisePower &) = delete;
565 /** Default move constructor */
566 CLElementwisePower(CLElementwisePower &&);
567 /** Prevent instances of this class from being copied (As this class contains pointers) */
568 CLElementwisePower &operator=(const CLElementwisePower &) = delete;
569 /** Default move assignment operator */
570 CLElementwisePower &operator=(CLElementwisePower &&);
Usama Arif52c54f62019-05-14 10:22:36 +0100571 /** Initialise the kernel's inputs, output and conversion policy.
572 *
Sheri Zhang6124ce62021-05-04 14:03:13 +0100573 * Valid data layouts:
574 * - All
575 *
576 * Valid data type configurations:
577 * |src0 |src1 |dst |
578 * |:--------------|:--------------|:--------------|
579 * |F16 |F16 |F16 |
580 * |F32 |F32 |F32 |
581 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000582 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
583 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
584 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
585 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
586 * @param[out] output Output tensor. Data types supported:F16/F32.
587 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100588 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000589 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100590 /** Initialise the kernel's inputs, output and conversion policy.
591 *
592 * @param[in] compile_context The compile context to be used.
593 * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
594 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
595 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32.
596 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
597 * @param[out] output Output tensor. Data types supported:F16/F32.
598 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
599 */
600 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michele Di Giorgio1e0208a2021-01-22 15:42:59 +0000601 /** Static function to check if given info will lead to a valid configuration of @ref opencl::kernels::ClArithmeticKernel for power
Usama Arif52c54f62019-05-14 10:22:36 +0100602 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000603 * @param[in] input1 First tensor input info. Data types supported: F16/F32.
604 * @param[in] input2 Second tensor input info. Data types supported: F16/F32.
605 * @param[in] output Output tensor info. Data types supported: F16/F32.
606 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Usama Arif52c54f62019-05-14 10:22:36 +0100607 *
608 * @return a status
609 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000610 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrouad7515d2020-07-24 00:02:23 +0100611
612 // Inherited methods overridden:
613 void run() override;
614
615private:
616 struct Impl;
617 std::unique_ptr<Impl> _impl;
Usama Arif52c54f62019-05-14 10:22:36 +0100618};
giuros01164a2722018-11-20 18:34:46 +0000619} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000620#endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */