blob: 1ce37d31c18d6e9ef318ea7db6e94990d7d4aed8 [file] [log] [blame]
Georgios Pinitas0bc78492019-03-18 20:07:37 +00001/*
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +00002 * Copyright (c) 2019-2023 Arm Limited.
Georgios Pinitas0bc78492019-03-18 20:07:37 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +000024#ifndef ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS
25#define ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS
Georgios Pinitas0bc78492019-03-18 20:07:37 +000026
Sang-Hoon Parkc2617982020-05-20 22:13:47 +010027#include "arm_compute/core/PixelValue.h"
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +010028#include "arm_compute/core/Types.h"
Matthew Benthamf1aeab92023-05-30 13:35:34 +000029#include "arm_compute/core/ActivationLayerInfo.h"
SiCongLi1af54162021-10-06 15:25:57 +010030#include "arm_compute/core/experimental/IPostOp.h"
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +010031
Georgios Pinitas0bc78492019-03-18 20:07:37 +000032namespace arm_compute
33{
Georgios Pinitas8be91482019-03-26 17:23:28 +000034/** Descriptor for FFT scale kernels */
35struct FFTScaleKernelInfo
Georgios Pinitas0bc78492019-03-18 20:07:37 +000036{
Georgios Pinitas8be91482019-03-26 17:23:28 +000037 float scale{ 0.f }; /**< Axis to perform the kernel on. */
38 bool conjugate{ true }; /**< Flag to conjugate the output/ */
39};
40
41/** Descriptor for FFT digit reverse kernels */
42struct FFTDigitReverseKernelInfo
43{
44 unsigned int axis{ 0 }; /**< Axis to perform the kernel on. */
45 bool conjugate{ false }; /**< Flag to conjugate the output/ */
46};
47
48/** Descriptor used by the FFT core kernels */
49struct FFTRadixStageKernelInfo
50{
51 unsigned int axis{ 0 }; /**< Axis to run the kernel on. */
Georgios Pinitas0bc78492019-03-18 20:07:37 +000052 unsigned int radix{ 0 }; /**< Radix to use. */
53 unsigned int Nx{ 0 }; /**< Nx coefficient. */
54 bool is_first_stage{ false }; /**< Flags if the FFT kernels is the first stage of a decomposed FFT. */
55};
Gian Marco Iodice7026b302019-06-26 17:18:11 +010056
SiCongLi1af54162021-10-06 15:25:57 +010057class ITensorInfo;
Gian Marco Iodice7026b302019-06-26 17:18:11 +010058/** Descriptor used by the GEMM kernels */
59struct GEMMKernelInfo
60{
morgolockaba2f912020-05-05 16:28:19 +010061 GEMMKernelInfo() = default;
62 GEMMKernelInfo(
SiCongLi1af54162021-10-06 15:25:57 +010063 unsigned int im,
64 unsigned int in,
65 unsigned int ik,
66 unsigned int idepth_output_gemm3d,
67 bool ireinterpret_input_as_3d,
68 bool ibroadcast_bias,
69 bool ifp_mixed_precision,
70 bool ihas_pad_y,
71 ActivationLayerInfo iactivation_info,
72 int inmult_transpose1xW_width,
73 int imult_interleave4x4_height,
74 GEMMLHSMatrixInfo ilhs_info,
75 GEMMRHSMatrixInfo irhs_info,
76 int32_t ina_offset,
77 int32_t inb_offset,
78 const experimental::PostOpList<ITensorInfo *> &ipost_ops = experimental::PostOpList<ITensorInfo *> {})
morgolockaba2f912020-05-05 16:28:19 +010079 : m(im), n(in), k(ik), depth_output_gemm3d(idepth_output_gemm3d), reinterpret_input_as_3d(ireinterpret_input_as_3d), broadcast_bias(ibroadcast_bias), fp_mixed_precision(ifp_mixed_precision),
Michele Di Giorgio655e8c62021-01-28 12:51:02 +000080 has_pad_y(ihas_pad_y), activation_info(iactivation_info), mult_transpose1xW_width(inmult_transpose1xW_width), mult_interleave4x4_height(imult_interleave4x4_height), lhs_info(ilhs_info),
SiCongLi1af54162021-10-06 15:25:57 +010081 rhs_info(irhs_info), a_offset(ina_offset), b_offset(inb_offset), post_ops(ipost_ops)
morgolockaba2f912020-05-05 16:28:19 +010082 {
83 }
84
SiCongLi1af54162021-10-06 15:25:57 +010085 unsigned int m{ 0 }; /**< Number of LHS rows*/
86 unsigned int n{ 0 }; /**< Number of RHS columns*/
87 unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */
88 unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */
89 bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */
90 bool broadcast_bias{ false }; /**< Flag used to broadcast the bias addition */
91 bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */
92 bool has_pad_y{ false }; /**< Flag used to indicate if the input/output tensors have internal pad on the y direction */
93 ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */
94 int mult_transpose1xW_width{ 1 }; /**< Multiplication factor for the width of the 1xW transposed block */
95 int mult_interleave4x4_height{ 1 }; /**< Multiplication factor for the height of the 4x4 interleaved block */
96 GEMMLHSMatrixInfo lhs_info{}; /**< LHS matrix information used to retrieve the number of rows processed by each thread */
97 GEMMRHSMatrixInfo rhs_info{}; /**< RHS matrix information used for reshaping the RHS matrix */
98 int32_t a_offset{ 0 }; /**< Offset to be added to each element of the matrix A */
99 int32_t b_offset{ 0 }; /**< Offset to be added to each element of the matrix B */
100 GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */
101 experimental::PostOpList<ITensorInfo *> post_ops{}; /**< (EXPERIMENTAL_POST_OPS) Specifies a list of post ops to be fused after the main op. Note unsupported post ops would not be executed.
102 * If specified, automatically disable the @ref activation_info */
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100103};
Gian Marco Iodice9285adb2019-09-05 16:10:27 +0100104
Gian Marco Iodice8155c022021-04-16 15:08:59 +0100105/** Compute descriptor used by the depthwise convolution native kernel */
106struct DWCComputeKernelInfo
Gian Marco Iodice9285adb2019-09-05 16:10:27 +0100107{
Gian Marco Iodice9d3bd412022-12-30 09:45:00 +0000108 unsigned int n0{ 1 }; /**< Number of columns processed by each thread */
109 unsigned int m0{ 1 }; /**< Number of rows processed by each thread */
Gian Marco Iodicead9a7ed2022-09-16 14:14:21 +0100110 bool export_input_to_cl_image{ false }; /**< Export input to cl_image */
Gian Marco Iodice8155c022021-04-16 15:08:59 +0100111 bool export_weights_to_cl_image{ false }; /**< Export the weights to cl_image */
Gian Marco Iodice9285adb2019-09-05 16:10:27 +0100112};
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000113
Gian Marco Iodice2cc50b32022-05-30 14:41:49 +0100114/** Compute descriptor used by the direct convolution kernel */
115struct DirectConvComputeKernelInfo
116{
117 int32_t m0{ 1 }; /**< Number of rows to be processed by the kernel */
118 int32_t n0{ 1 }; /**< Number of columns to be processed by the kernel */
119 int32_t k0{ 1 }; /**< Number of partial accumulations to be processed in a single iteration by the kernel */
120 bool export_weights_to_cl_image{ false }; /**< Flag to export the weights to cl_image */
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +0000121 bool export_output_to_cl_image{ false }; /**< Flag to export the output to cl_image */
122 bool export_input_to_cl_image{ false }; /**< Flag to export the input to cl_image */
Gian Marco Iodice2cc50b32022-05-30 14:41:49 +0100123};
124
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000125/** Descriptor used by the softmax kernels */
126struct SoftmaxKernelInfo
127{
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000128 float beta{ 1.f }; /**< A scaling factor for the exponent with default value 1.0 */
129 bool is_log{ false }; /**< Flag used to perform Log Softmax operation */
130 DataType input_data_type{ DataType::UNKNOWN }; /**< Input tensor data type */
Sang-Hoon Park201e0fe2021-01-27 13:14:56 +0000131 int32_t axis{ 0 }; /**< The dimension in which to apply softmax. */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000132};
Michele Di Giorgio45361932019-12-19 13:53:44 +0000133
134/** Descriptor used by the direct convolution layer output stage kernels */
135struct DirectConvolutionLayerOutputStageKernelInfo
136{
137 int32_t result_fixedpoint_multiplier{ 0 }; /**< Result output stage multiplier used for quantizing */
138 int32_t result_shift{ 0 }; /**< Result output stage shift used for quantizing */
139 int32_t result_offset_after_shift{ 0 }; /**< Result offset used for quantizing */
140 DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */
141};
Georgios Pinitas55a687d2020-01-30 12:00:23 +0000142
143struct InstanceNormalizationLayerKernelInfo
144{
145 /** Default constructor */
146 InstanceNormalizationLayerKernelInfo()
147 : InstanceNormalizationLayerKernelInfo(1.f, 0.f, 1e-12, true)
148 {
149 }
150 /** Constructor
151 *
152 * @param[in] gamma The scale scalar value applied to the normalized tensor.
153 * @param[in] beta The offset scalar value applied to the normalized tensor
154 * @param[in] epsilon Lower bound value for the normalization.
155 * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution.
156 */
157 InstanceNormalizationLayerKernelInfo(float gamma, float beta, float epsilon, bool use_mixed_precision)
158 : gamma(gamma), beta(beta), epsilon(epsilon), use_mixed_precision(use_mixed_precision)
159 {
160 }
161
162 float gamma; /**< The scale scalar value applied to the normalized tensor. Defaults to 1.0 */
163 float beta; /**< The offset scalar value applied to the normalized tensor. Defaults to 0.0 */
164 float epsilon; /**< Lower bound value for the normalization. Defaults to 1e-12 */
165 bool use_mixed_precision; /**< Use mixed precision in case of FP16 execution. Defaults to true */
166};
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000167
168struct GEMMLowpReductionKernelInfo
169{
170 /** Default constructor */
171 GEMMLowpReductionKernelInfo() = default;
172 /** Constructor
173 *
174 * @param[in] k Number of matrix columns/rows.
175 * @param[in] is_reshaped True if the input tensor has been reshaped.
176 * @param[in] scalar Scalar value to multiply each reduced column/row by.
177 * @param[in] mul_by_scalar True if each column/row reduction has to be multiplied by a scalar value.
178 */
179 GEMMLowpReductionKernelInfo(int32_t k, bool is_reshaped, int32_t scalar, bool mul_by_scalar)
180 : k(k), is_reshaped(is_reshaped), scalar(scalar), mul_by_scalar(mul_by_scalar)
181 {
182 }
183
184 int32_t k{ 0 }; /**< Number of matrix columns/rows */
185 bool is_reshaped{ false }; /**< True if the input tensor has been reshaped */
186 int32_t scalar{ 0 }; /**< Scalar value to multiply each reduced column/row by */
187 bool mul_by_scalar{ false }; /**< True if each column/row reduction has to be multiplied by a scalar value */
188};
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100189
190struct ScaleKernelInfo
191{
192 /** Constructor
193 *
194 * @param[in] interpolation_policy Interpolation type to use
195 * @param[in] border_mode Border mode policy
196 * @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT and use_padding is set to false. Defaults to default @ref PixelValue
197 * @param[in] sampling_policy (Optional) Sampling policy used by the interpolation. Defaults to @ref SamplingPolicy::CENTER
198 * @param[in] use_padding (Optional) Is padding in use or not. Defaults to true.
199 * @param[in] align_corners (Optional) Align corners of input and output, only affecting bilinear policy with TOP_LEFT sampling policy. Defaults to false.
Michele Di Giorgio655e8c62021-01-28 12:51:02 +0000200 * @param[in] data_layout (Optional) Data layout used by the layer. Defaults to @ref DataLayout::UNKNOWN
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100201 */
202 ScaleKernelInfo(InterpolationPolicy interpolation_policy,
203 BorderMode border_mode,
204 PixelValue constant_border_value = PixelValue(),
205 SamplingPolicy sampling_policy = SamplingPolicy::CENTER,
206 bool use_padding = true,
Michele Di Giorgio655e8c62021-01-28 12:51:02 +0000207 bool align_corners = false,
Pablo Marquez Tello383de022021-03-18 11:31:13 +0000208 DataLayout data_layout = DataLayout::UNKNOWN) noexcept
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100209 : interpolation_policy{ interpolation_policy },
Pablo Marquez Tello383de022021-03-18 11:31:13 +0000210 border_mode{ border_mode },
211 constant_border_value{ constant_border_value },
212 sampling_policy{ sampling_policy },
213 use_padding{ use_padding },
214 align_corners{ align_corners },
215 data_layout{ data_layout }
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100216 {
217 }
218
219 InterpolationPolicy interpolation_policy; /**< Interpolation type to use */
220 BorderMode border_mode; /**< Border mode policy */
221 PixelValue constant_border_value; /**< Constant value to use for constant border mode policy */
222 SamplingPolicy sampling_policy; /**< Sampling policy used by the interpolation. */
223 bool use_padding; /**< Indication of using padding */
224 bool align_corners; /**< Align corners of input and output */
Michele Di Giorgio655e8c62021-01-28 12:51:02 +0000225 DataLayout data_layout; /**< Data layout to use */
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100226};
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000227
228struct MatMulKernelInfo
229{
Gian Marco Iodice352c07d2023-05-03 12:21:38 +0100230 MatMulKernelInfo() = default;
231 MatMulKernelInfo(bool adj_lhs, bool adj_rhs, int m0 = 1, int n0 = 1, int k0 = 1, bool export_rhs_to_cl_image = false)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000232 : adj_lhs{ adj_lhs }, adj_rhs{ adj_rhs }, m0{ m0 }, n0{ n0 }, k0{ k0 }, export_rhs_to_cl_image{ export_rhs_to_cl_image }
233 {
234 }
235 bool adj_lhs{ false }; /**< Get Adjoint LHS flag value */
236 bool adj_rhs{ false }; /**< Get Adjoint RHS flag value */
237 int m0{ 1 }; /**< Number of output rows processed by each work-item*/
238 int n0{ 1 }; /**< Number of output columns processed by each work-item*/
239 int k0{ 1 }; /**< Number of inner accumulations */
240 bool export_rhs_to_cl_image{ false }; /**< Flag to know whether the RHS tensor should be exported to cl_image*/
241};
Georgios Pinitas0bc78492019-03-18 20:07:37 +0000242} // namespace arm_compute
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000243#endif /* ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS */