blob: 2066012306e0c93793f366df88dc026aa9052d5e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2016-2020 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H
25#define ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
Michalis Spyrou1009e872020-07-27 12:48:34 +010027#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
28#include "arm_compute/runtime/CL/ICLOperator.h"
29#include "arm_compute/runtime/IFunction.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030
31namespace arm_compute
32{
Georgios Pinitas8be91482019-03-26 17:23:28 +000033// Forward declaration
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034class ICLTensor;
35
Michalis Spyrou1009e872020-07-27 12:48:34 +010036namespace experimental
37{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038/** Basic function to run @ref CLPixelWiseMultiplicationKernel. */
Michalis Spyrou1009e872020-07-27 12:48:34 +010039class CLPixelWiseMultiplication : public ICLOperator
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040{
41public:
Michalis Spyrou1009e872020-07-27 12:48:34 +010042 /** Default Constructor */
43 CLPixelWiseMultiplication();
44 /** Initialise the kernel's inputs, output and convertion policy.
45 *
46 * Valid configurations (Input1,Input2) -> Output :
47 *
48 * - (U8,U8) -> U8
49 * - (U8,U8) -> S16
50 * - (U8,S16) -> S16
51 * - (S16,U8) -> S16
52 * - (S16,S16) -> S16
53 * - (F16,F16) -> F16
54 * - (F32,F32) -> F32
55 * - (QASYMM8,QASYMM8) -> QASYMM8
56 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
57 * - (QSYMM16,QSYMM16) -> QSYMM16
58 * - (QSYMM16,QSYMM16) -> S32
59 *
60 * @param[in] compile_context The compile context to be used.
61 * @param[in, out] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
62 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
63 * @param[in, out] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
64 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
65 * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
66 * @param[in] scale Scale to apply after multiplication.
67 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
68 * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
69 * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
70 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
71 */
72 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
73 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
74 /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
75 *
76 * Valid configurations (Input1,Input2) -> Output :
77 *
78 * - (U8,U8) -> U8
79 * - (U8,U8) -> S16
80 * - (U8,S16) -> S16
81 * - (S16,U8) -> S16
82 * - (S16,S16) -> S16
83 * - (F16,F16) -> F16
84 * - (F32,F32) -> F32
85 * - (QASYMM8,QASYMM8) -> QASYMM8
86 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
87 * - (QSYMM16,QSYMM16) -> QSYMM16
88 * - (QSYMM16,QSYMM16) -> S32
89 *
90 *
91 * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
92 * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
93 * @param[in] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
94 * @param[in] scale Scale to apply after multiplication.
95 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
96 * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
97 * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
98 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
99 *
100 * @return a status
101 */
102 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
103 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
104
105 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100106 void run(ITensorPack &tensors) override;
Michalis Spyrou1009e872020-07-27 12:48:34 +0100107
108private:
109 CLFillBorderKernel _border_handler;
110};
111
112/** Basic function to run @ref CLComplexPixelWiseMultiplicationKernel. */
113class CLComplexPixelWiseMultiplication : public ICLOperator
114{
115public:
116 /** Default Constructor */
117 CLComplexPixelWiseMultiplication();
118 /** Initialise the kernel's inputs, output.
119 *
120 * @param[in] compile_context The compile context to be used.
121 * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
122 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
123 * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
124 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
125 * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
126 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
127 */
128 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
129 /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplication
130 *
131 * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
132 * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
133 * @param[in] output The output tensor info, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
134 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
135 */
136 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
137
138 // Inherited methods overridden:
Georgios Pinitas0499dff2020-07-31 22:21:38 +0100139 void run(ITensorPack &tensors) override;
Michalis Spyrou1009e872020-07-27 12:48:34 +0100140
141private:
142 CLFillBorderKernel _border_handler;
143};
144} // namespace experimental
145
146/** Basic function to run @ref CLPixelWiseMultiplicationKernel. */
147class CLPixelWiseMultiplication : public IFunction
148{
149public:
150 /** Default Constructor */
151 CLPixelWiseMultiplication();
152 /** Default Destructor */
153 ~CLPixelWiseMultiplication();
154 /** Prevent instances of this class from being copied (As this class contains pointers) */
155 CLPixelWiseMultiplication(const CLPixelWiseMultiplication &) = delete;
156 /** Default move constructor */
157 CLPixelWiseMultiplication(CLPixelWiseMultiplication &&);
158 /** Prevent instances of this class from being copied (As this class contains pointers) */
159 CLPixelWiseMultiplication &operator=(const CLPixelWiseMultiplication &) = delete;
160 /** Default move assignment operator */
161 CLPixelWiseMultiplication &operator=(CLPixelWiseMultiplication &&);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100162 /** Initialise the kernel's inputs, output and convertion policy.
163 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100164 * Valid configurations (Input1,Input2) -> Output :
165 *
166 * - (U8,U8) -> U8
167 * - (U8,U8) -> S16
168 * - (U8,S16) -> S16
169 * - (S16,U8) -> S16
170 * - (S16,S16) -> S16
171 * - (F16,F16) -> F16
172 * - (F32,F32) -> F32
173 * - (QASYMM8,QASYMM8) -> QASYMM8
174 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
175 * - (QSYMM16,QSYMM16) -> QSYMM16
176 * - (QSYMM16,QSYMM16) -> S32
177 *
Michele Di Giorgiocbbed282019-12-20 13:26:08 +0000178 * @param[in, out] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000179 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100180 * @param[in, out] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000181 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100182 * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000183 * @param[in] scale Scale to apply after multiplication.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100184 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000185 * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
186 * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000187 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100188 */
Michele Di Giorgio6259e5f2018-01-17 17:29:33 +0000189 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, float scale,
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000190 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100191 /** Initialise the kernel's inputs, output and convertion policy.
192 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100193 * Valid configurations (Input1,Input2) -> Output :
194 *
195 * - (U8,U8) -> U8
196 * - (U8,U8) -> S16
197 * - (U8,S16) -> S16
198 * - (S16,U8) -> S16
199 * - (S16,S16) -> S16
200 * - (F16,F16) -> F16
201 * - (F32,F32) -> F32
202 * - (QASYMM8,QASYMM8) -> QASYMM8
203 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
204 * - (QSYMM16,QSYMM16) -> QSYMM16
205 * - (QSYMM16,QSYMM16) -> S32
206 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100207 * @param[in] compile_context The compile context to be used.
208 * @param[in, out] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
209 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100210 * @param[in, out] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100211 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100212 * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100213 * @param[in] scale Scale to apply after multiplication.
214 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
215 * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
216 * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
217 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
218 */
219 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, float scale,
220 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000221 /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
222 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100223 * Valid configurations (Input1,Input2) -> Output :
224 *
225 * - (U8,U8) -> U8
226 * - (U8,U8) -> S16
227 * - (U8,S16) -> S16
228 * - (S16,U8) -> S16
229 * - (S16,S16) -> S16
230 * - (F16,F16) -> F16
231 * - (F32,F32) -> F32
232 * - (QASYMM8,QASYMM8) -> QASYMM8
233 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
234 * - (QSYMM16,QSYMM16) -> QSYMM16
235 * - (QSYMM16,QSYMM16) -> S32
236 *
237 *
Michele Di Giorgiocbbed282019-12-20 13:26:08 +0000238 * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100239 * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
240 * @param[in] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000241 * @param[in] scale Scale to apply after multiplication.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100242 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000243 * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
244 * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000245 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000246 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000247 * @return a status
Georgios Pinitasf9d3a0a2017-11-03 19:01:44 +0000248 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000249 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000250 ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrou1009e872020-07-27 12:48:34 +0100251
252 // Inherited methods overridden:
253 void run() override;
254
255private:
256 struct Impl;
257 std::unique_ptr<Impl> _impl;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100258};
Georgios Pinitas8be91482019-03-26 17:23:28 +0000259
260/** Basic function to run @ref CLComplexPixelWiseMultiplicationKernel. */
Michalis Spyrou1009e872020-07-27 12:48:34 +0100261class CLComplexPixelWiseMultiplication : public IFunction
Georgios Pinitas8be91482019-03-26 17:23:28 +0000262{
263public:
Michalis Spyrou1009e872020-07-27 12:48:34 +0100264 /** Default Constructor */
265 CLComplexPixelWiseMultiplication();
266 /** Default Destructor */
267 ~CLComplexPixelWiseMultiplication();
268 /** Prevent instances of this class from being copied (As this class contains pointers) */
269 CLComplexPixelWiseMultiplication(const CLComplexPixelWiseMultiplication &) = delete;
270 /** Default move constructor */
271 CLComplexPixelWiseMultiplication(CLComplexPixelWiseMultiplication &&);
272 /** Prevent instances of this class from being copied (As this class contains pointers) */
273 CLComplexPixelWiseMultiplication &operator=(const CLComplexPixelWiseMultiplication &) = delete;
274 /** Default move assignment operator */
275 CLComplexPixelWiseMultiplication &operator=(CLComplexPixelWiseMultiplication &&);
Georgios Pinitas8be91482019-03-26 17:23:28 +0000276 /** Initialise the kernel's inputs, output.
277 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000278 * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
279 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
280 * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
281 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
282 * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
283 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Georgios Pinitas8be91482019-03-26 17:23:28 +0000284 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000285 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100286 /** Initialise the kernel's inputs, output.
287 *
288 * @param[in] compile_context The compile context to be used.
289 * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
290 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
291 * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
292 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
293 * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
294 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
295 */
296 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Georgios Pinitas8be91482019-03-26 17:23:28 +0000297 /** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplication
298 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000299 * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
300 * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
301 * @param[in] output The output tensor info, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
302 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Georgios Pinitas8be91482019-03-26 17:23:28 +0000303 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000304 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrou1009e872020-07-27 12:48:34 +0100305
306 // Inherited methods overridden:
307 void run() override;
308
309private:
310 struct Impl;
311 std::unique_ptr<Impl> _impl;
Georgios Pinitas8be91482019-03-26 17:23:28 +0000312};
313} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000314#endif /*ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H */