blob: 91cf44ff2e05f87d63f64fd7a1993541ddf56c9c [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2016-2020 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
25#define ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
27#include "arm_compute/core/Types.h"
Michalis Spyrou6eb73452020-07-02 17:39:25 +010028#include "arm_compute/runtime/IFunction.h"
29#include "arm_compute/runtime/NEON/INEOperator.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030
31namespace arm_compute
32{
33class ITensor;
Michalis Spyrouebcebf12020-10-21 00:04:14 +010034class ITensorInfo;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
Michalis Spyrou6eb73452020-07-02 17:39:25 +010036namespace experimental
37{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038/** Basic function to run @ref NEPixelWiseMultiplicationKernel */
Michalis Spyrou6eb73452020-07-02 17:39:25 +010039class NEPixelWiseMultiplication : public INEOperator
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040{
41public:
42 /** Initialise the kernel's inputs, output and convertion policy.
43 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010044 * Valid configurations (Input1,Input2) -> Output :
45 *
SiCong Lid6d1b362020-09-24 17:34:23 +010046 * Support: Broadcast? Scale=1/255?
47 * - (U8,U8) -> U8, S16 N Y
48 * - (U8,S16) -> S16 N Y
49 * - (S16,U8) -> S16 N Y
50 * - (S16,S16) -> S16 N Y
51 * - (S32,S32) -> S32 Y N
52 * - (F16,F16) -> F16 N Y
53 * - (F32,F32) -> F32 Y Y
54 * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y
55 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y
56 * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010057 *
Manuel Bottini79fa9a22019-02-22 17:54:22 +000058 * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
59 * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
60 *
SiCong Libb88f892020-08-28 11:18:47 +010061 * @param[in, out] input1 First input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
Manuel Bottini79fa9a22019-02-22 17:54:22 +000062 * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
SiCong Libb88f892020-08-28 11:18:47 +010063 * @param[in, out] input2 Second input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
Manuel Bottini79fa9a22019-02-22 17:54:22 +000064 * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010065 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
Michalis Spyrou861f0db2018-02-26 16:47:58 +000066 * @param[in] scale Scale to apply after multiplication.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010067 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
SiCong Libb88f892020-08-28 11:18:47 +010068 * If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
69 * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
Michalis Spyrou861f0db2018-02-26 16:47:58 +000070 * @param[in] rounding_policy Rounding policy.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000071 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072 */
Michalis Spyrou6eb73452020-07-02 17:39:25 +010073 void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
Giorgio Arena8b2a7d32020-02-11 17:21:31 +000074 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Ioan-Cristian Szabo754e9522017-11-28 18:29:43 +000075 /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
76 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010077 * Valid configurations (Input1,Input2) -> Output :
78 *
SiCong Lid6d1b362020-09-24 17:34:23 +010079 * Support: Broadcast? Scale=1/255?
80 * - (U8,U8) -> U8, S16 N Y
81 * - (U8,S16) -> S16 N Y
82 * - (S16,U8) -> S16 N Y
83 * - (S16,S16) -> S16 N Y
84 * - (S32,S32) -> S32 Y N
85 * - (F16,F16) -> F16 N Y
86 * - (F32,F32) -> F32 Y Y
87 * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y
88 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y
89 * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010090 *
Manuel Bottini79fa9a22019-02-22 17:54:22 +000091 * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
92 * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
93 *
SiCong Libb88f892020-08-28 11:18:47 +010094 * @param[in] input1 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
95 * @param[in] input2 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010096 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
Manuel Bottini79fa9a22019-02-22 17:54:22 +000097 * @param[in] scale Scale to apply after multiplication.
98 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
SiCong Libb88f892020-08-28 11:18:47 +010099 * If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
100 * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
Ioan-Cristian Szabo754e9522017-11-28 18:29:43 +0000101 * @param[in] rounding_policy Rounding policy.
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000102 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
Ioan-Cristian Szabo754e9522017-11-28 18:29:43 +0000103 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000104 * @return a status
Ioan-Cristian Szabo754e9522017-11-28 18:29:43 +0000105 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000106 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
107 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100108};
giuros01154bc1c2019-03-26 17:44:40 +0000109
110/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100111class NEComplexPixelWiseMultiplication : public INEOperator
giuros01154bc1c2019-03-26 17:44:40 +0000112{
113public:
114 /** Initialise the kernel's inputs, output.
115 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000116 * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
117 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
118 * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
119 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
120 * @param[out] output The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
121 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
giuros01154bc1c2019-03-26 17:44:40 +0000122 */
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100123 void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
124 /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
125 *
126 * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
127 * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
128 * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
129 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
130 */
131 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100132};
133} // namespace experimental
134
135/** Basic function to run @ref NEPixelWiseMultiplicationKernel */
136class NEPixelWiseMultiplication : public IFunction
137{
138public:
139 /** Default Constructor */
140 NEPixelWiseMultiplication();
141 /** Default Destructor */
142 ~NEPixelWiseMultiplication();
143 /** Prevent instances of this class from being copied (As this class contains pointers) */
144 NEPixelWiseMultiplication(const NEPixelWiseMultiplication &) = delete;
145 /** Default move constructor */
146 NEPixelWiseMultiplication(NEPixelWiseMultiplication &&);
147 /** Prevent instances of this class from being copied (As this class contains pointers) */
148 NEPixelWiseMultiplication &operator=(const NEPixelWiseMultiplication &) = delete;
149 /** Default move assignment operator */
150 NEPixelWiseMultiplication &operator=(NEPixelWiseMultiplication &&);
151 /** Initialise the kernel's inputs, output and convertion policy.
152 *
153 * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
154 * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
155 *
SiCong Libb88f892020-08-28 11:18:47 +0100156 * @param[in, out] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100157 * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
SiCong Libb88f892020-08-28 11:18:47 +0100158 * @param[in, out] input2 An input tensor. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if @p input1 is QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100159 * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
160 * @param[out] output Output tensor. Data types supported:
161 * - U8, only if both inputs are U8.
162 * - QASYMM8, only if both inputs are QASYMM8.
163 * - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
164 * - S16.
165 * - QSYMM16, only if both inputs are QSYMM16.
SiCong Libb88f892020-08-28 11:18:47 +0100166 * - S32, only if both inputs are S32 or both are QSYMM16.
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100167 * - F16, only if @p input1 is F16.
168 * - F32, only if both inputs are F32.
169 * @param[in] scale Scale to apply after multiplication.
170 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
SiCong Libb88f892020-08-28 11:18:47 +0100171 * If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
172 * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100173 * @param[in] rounding_policy Rounding policy.
174 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
175 */
176 void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
177 const ActivationLayerInfo &act_info = ActivationLayerInfo());
178 /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
179 *
180 * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
181 * For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
182 *
SiCong Libb88f892020-08-28 11:18:47 +0100183 * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
184 * @param[in] input2 An input tensor info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if both inputs are QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100185 * @param[in] output Output tensor info. Data types supported:
186 * - U8, only if both inputs are U8.
187 * - QASYMM8, only if both inputs are QASYMM8.
188 * - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
189 * - S16.
190 * - QSYMM16, only if both inputs are QSYMM16.
SiCong Libb88f892020-08-28 11:18:47 +0100191 * - S32, only if both inputs are S32 or both are QSYMM16.
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100192 * - F16, only if @p input1 is F16.
193 * - F32, only if both inputs are F32.
194 * @param[in] scale Scale to apply after multiplication.
195 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
SiCong Libb88f892020-08-28 11:18:47 +0100196 * If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
197 * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100198 * @param[in] rounding_policy Rounding policy.
199 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
200 *
201 * @return a status
202 */
203 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
204 const ActivationLayerInfo &act_info = ActivationLayerInfo());
205
206 // Inherited methods overridden:
207 void run() override;
208
209private:
210 struct Impl;
211 std::unique_ptr<Impl> _impl;
212};
213
214/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
215class NEComplexPixelWiseMultiplication : public IFunction
216{
217public:
218 /** Default Constructor */
219 NEComplexPixelWiseMultiplication();
220 /** Default Destructor */
221 ~NEComplexPixelWiseMultiplication();
222 /** Prevent instances of this class from being copied (As this class contains pointers) */
223 NEComplexPixelWiseMultiplication(const NEComplexPixelWiseMultiplication &) = delete;
224 /** Default move constructor */
225 NEComplexPixelWiseMultiplication(NEComplexPixelWiseMultiplication &&);
226 /** Prevent instances of this class from being copied (As this class contains pointers) */
227 NEComplexPixelWiseMultiplication &operator=(const NEComplexPixelWiseMultiplication &) = delete;
228 /** Default move assignment operator */
229 NEComplexPixelWiseMultiplication &operator=(NEComplexPixelWiseMultiplication &&);
230 /** Initialise the kernel's inputs, output.
231 *
232 * @param[in, out] input1 An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
233 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
234 * @param[in, out] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
235 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
236 * @param[out] output The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
237 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
238 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000239 void configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
giuros01154bc1c2019-03-26 17:44:40 +0000240 /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
241 *
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000242 * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
243 * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
244 * @param[in] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
245 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
giuros01154bc1c2019-03-26 17:44:40 +0000246 */
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000247 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Michalis Spyrou6eb73452020-07-02 17:39:25 +0100248
249 // Inherited methods overridden:
250 void run() override;
251
252private:
253 struct Impl;
254 std::unique_ptr<Impl> _impl;
giuros01154bc1c2019-03-26 17:44:40 +0000255};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100256}
Michalis Spyrouf4643372019-11-29 16:17:13 +0000257#endif /*ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H */