blob: 2bf5dee18c3ec9105764e2418f36fc24513f11e1 [file] [log] [blame]
Georgios Pinitas0bc78492019-03-18 20:07:37 +00001/*
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +00002 * Copyright (c) 2019-2023 Arm Limited.
Georgios Pinitas0bc78492019-03-18 20:07:37 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Jakub Sujak0d27b2e2023-08-24 14:01:20 +010024#ifndef ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS_H
25#define ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS_H
Georgios Pinitas0bc78492019-03-18 20:07:37 +000026
Sang-Hoon Parkc2617982020-05-20 22:13:47 +010027#include "arm_compute/core/PixelValue.h"
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +010028#include "arm_compute/core/Types.h"
SiCong Li91295492023-07-21 18:16:13 +010029#include "arm_compute/function_info/ActivationLayerInfo.h"
Gian Marco Iodiceca1f4602019-07-16 15:46:48 +010030
Georgios Pinitas0bc78492019-03-18 20:07:37 +000031namespace arm_compute
32{
Georgios Pinitas8be91482019-03-26 17:23:28 +000033/** Descriptor for FFT scale kernels */
34struct FFTScaleKernelInfo
Georgios Pinitas0bc78492019-03-18 20:07:37 +000035{
Georgios Pinitas8be91482019-03-26 17:23:28 +000036 float scale{ 0.f }; /**< Axis to perform the kernel on. */
37 bool conjugate{ true }; /**< Flag to conjugate the output/ */
38};
39
40/** Descriptor for FFT digit reverse kernels */
41struct FFTDigitReverseKernelInfo
42{
43 unsigned int axis{ 0 }; /**< Axis to perform the kernel on. */
44 bool conjugate{ false }; /**< Flag to conjugate the output/ */
45};
46
47/** Descriptor used by the FFT core kernels */
48struct FFTRadixStageKernelInfo
49{
50 unsigned int axis{ 0 }; /**< Axis to run the kernel on. */
Georgios Pinitas0bc78492019-03-18 20:07:37 +000051 unsigned int radix{ 0 }; /**< Radix to use. */
52 unsigned int Nx{ 0 }; /**< Nx coefficient. */
53 bool is_first_stage{ false }; /**< Flags if the FFT kernels is the first stage of a decomposed FFT. */
54};
Gian Marco Iodice7026b302019-06-26 17:18:11 +010055
SiCongLi1af54162021-10-06 15:25:57 +010056class ITensorInfo;
Gian Marco Iodice7026b302019-06-26 17:18:11 +010057/** Descriptor used by the GEMM kernels */
58struct GEMMKernelInfo
59{
morgolockaba2f912020-05-05 16:28:19 +010060 GEMMKernelInfo() = default;
61 GEMMKernelInfo(
Jakub Sujak0d27b2e2023-08-24 14:01:20 +010062 unsigned int im,
63 unsigned int in,
64 unsigned int ik,
65 unsigned int idepth_output_gemm3d,
66 bool ireinterpret_input_as_3d,
67 bool ibroadcast_bias,
68 bool ifp_mixed_precision,
69 bool ihas_pad_y,
70 ActivationLayerInfo iactivation_info,
71 int inmult_transpose1xW_width,
72 int imult_interleave4x4_height,
73 GEMMLHSMatrixInfo ilhs_info,
74 GEMMRHSMatrixInfo irhs_info,
75 int32_t ina_offset,
76 int32_t inb_offset)
morgolockaba2f912020-05-05 16:28:19 +010077 : m(im), n(in), k(ik), depth_output_gemm3d(idepth_output_gemm3d), reinterpret_input_as_3d(ireinterpret_input_as_3d), broadcast_bias(ibroadcast_bias), fp_mixed_precision(ifp_mixed_precision),
Michele Di Giorgio655e8c62021-01-28 12:51:02 +000078 has_pad_y(ihas_pad_y), activation_info(iactivation_info), mult_transpose1xW_width(inmult_transpose1xW_width), mult_interleave4x4_height(imult_interleave4x4_height), lhs_info(ilhs_info),
Jakub Sujak0d27b2e2023-08-24 14:01:20 +010079 rhs_info(irhs_info), a_offset(ina_offset), b_offset(inb_offset)
morgolockaba2f912020-05-05 16:28:19 +010080 {
81 }
82
Jakub Sujak0d27b2e2023-08-24 14:01:20 +010083 unsigned int m{ 0 }; /**< Number of LHS rows*/
84 unsigned int n{ 0 }; /**< Number of RHS columns*/
85 unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */
86 unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */
87 bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */
88 bool broadcast_bias{ false }; /**< Flag used to broadcast the bias addition */
89 bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */
90 bool has_pad_y{ false }; /**< Flag used to indicate if the input/output tensors have internal pad on the y direction */
91 ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */
92 int mult_transpose1xW_width{ 1 }; /**< Multiplication factor for the width of the 1xW transposed block */
93 int mult_interleave4x4_height{ 1 }; /**< Multiplication factor for the height of the 4x4 interleaved block */
94 GEMMLHSMatrixInfo lhs_info{}; /**< LHS matrix information used to retrieve the number of rows processed by each thread */
95 GEMMRHSMatrixInfo rhs_info{}; /**< RHS matrix information used for reshaping the RHS matrix */
96 int32_t a_offset{ 0 }; /**< Offset to be added to each element of the matrix A */
97 int32_t b_offset{ 0 }; /**< Offset to be added to each element of the matrix B */
98 GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */
Gian Marco Iodice7026b302019-06-26 17:18:11 +010099};
Gian Marco Iodice9285adb2019-09-05 16:10:27 +0100100
Gian Marco Iodice8155c022021-04-16 15:08:59 +0100101/** Compute descriptor used by the depthwise convolution native kernel */
102struct DWCComputeKernelInfo
Gian Marco Iodice9285adb2019-09-05 16:10:27 +0100103{
Gian Marco Iodice9d3bd412022-12-30 09:45:00 +0000104 unsigned int n0{ 1 }; /**< Number of columns processed by each thread */
105 unsigned int m0{ 1 }; /**< Number of rows processed by each thread */
Gian Marco Iodicead9a7ed2022-09-16 14:14:21 +0100106 bool export_input_to_cl_image{ false }; /**< Export input to cl_image */
Gian Marco Iodice8155c022021-04-16 15:08:59 +0100107 bool export_weights_to_cl_image{ false }; /**< Export the weights to cl_image */
Gian Marco Iodice9285adb2019-09-05 16:10:27 +0100108};
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000109
Gian Marco Iodice2cc50b32022-05-30 14:41:49 +0100110/** Compute descriptor used by the direct convolution kernel */
111struct DirectConvComputeKernelInfo
112{
113 int32_t m0{ 1 }; /**< Number of rows to be processed by the kernel */
114 int32_t n0{ 1 }; /**< Number of columns to be processed by the kernel */
115 int32_t k0{ 1 }; /**< Number of partial accumulations to be processed in a single iteration by the kernel */
116 bool export_weights_to_cl_image{ false }; /**< Flag to export the weights to cl_image */
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +0000117 bool export_output_to_cl_image{ false }; /**< Flag to export the output to cl_image */
118 bool export_input_to_cl_image{ false }; /**< Flag to export the input to cl_image */
Gian Marco Iodice2cc50b32022-05-30 14:41:49 +0100119};
120
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000121/** Descriptor used by the softmax kernels */
122struct SoftmaxKernelInfo
123{
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000124 float beta{ 1.f }; /**< A scaling factor for the exponent with default value 1.0 */
125 bool is_log{ false }; /**< Flag used to perform Log Softmax operation */
126 DataType input_data_type{ DataType::UNKNOWN }; /**< Input tensor data type */
Sang-Hoon Park201e0fe2021-01-27 13:14:56 +0000127 int32_t axis{ 0 }; /**< The dimension in which to apply softmax. */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000128};
Michele Di Giorgio45361932019-12-19 13:53:44 +0000129
130/** Descriptor used by the direct convolution layer output stage kernels */
131struct DirectConvolutionLayerOutputStageKernelInfo
132{
133 int32_t result_fixedpoint_multiplier{ 0 }; /**< Result output stage multiplier used for quantizing */
134 int32_t result_shift{ 0 }; /**< Result output stage shift used for quantizing */
135 int32_t result_offset_after_shift{ 0 }; /**< Result offset used for quantizing */
136 DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */
137};
Georgios Pinitas55a687d2020-01-30 12:00:23 +0000138
139struct InstanceNormalizationLayerKernelInfo
140{
141 /** Default constructor */
142 InstanceNormalizationLayerKernelInfo()
143 : InstanceNormalizationLayerKernelInfo(1.f, 0.f, 1e-12, true)
144 {
145 }
146 /** Constructor
147 *
148 * @param[in] gamma The scale scalar value applied to the normalized tensor.
149 * @param[in] beta The offset scalar value applied to the normalized tensor
150 * @param[in] epsilon Lower bound value for the normalization.
151 * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution.
152 */
153 InstanceNormalizationLayerKernelInfo(float gamma, float beta, float epsilon, bool use_mixed_precision)
154 : gamma(gamma), beta(beta), epsilon(epsilon), use_mixed_precision(use_mixed_precision)
155 {
156 }
157
158 float gamma; /**< The scale scalar value applied to the normalized tensor. Defaults to 1.0 */
159 float beta; /**< The offset scalar value applied to the normalized tensor. Defaults to 0.0 */
160 float epsilon; /**< Lower bound value for the normalization. Defaults to 1e-12 */
161 bool use_mixed_precision; /**< Use mixed precision in case of FP16 execution. Defaults to true */
162};
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000163
164struct GEMMLowpReductionKernelInfo
165{
166 /** Default constructor */
167 GEMMLowpReductionKernelInfo() = default;
168 /** Constructor
169 *
170 * @param[in] k Number of matrix columns/rows.
171 * @param[in] is_reshaped True if the input tensor has been reshaped.
172 * @param[in] scalar Scalar value to multiply each reduced column/row by.
173 * @param[in] mul_by_scalar True if each column/row reduction has to be multiplied by a scalar value.
174 */
175 GEMMLowpReductionKernelInfo(int32_t k, bool is_reshaped, int32_t scalar, bool mul_by_scalar)
176 : k(k), is_reshaped(is_reshaped), scalar(scalar), mul_by_scalar(mul_by_scalar)
177 {
178 }
179
180 int32_t k{ 0 }; /**< Number of matrix columns/rows */
181 bool is_reshaped{ false }; /**< True if the input tensor has been reshaped */
182 int32_t scalar{ 0 }; /**< Scalar value to multiply each reduced column/row by */
183 bool mul_by_scalar{ false }; /**< True if each column/row reduction has to be multiplied by a scalar value */
184};
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100185
186struct ScaleKernelInfo
187{
188 /** Constructor
189 *
190 * @param[in] interpolation_policy Interpolation type to use
191 * @param[in] border_mode Border mode policy
192 * @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT and use_padding is set to false. Defaults to default @ref PixelValue
193 * @param[in] sampling_policy (Optional) Sampling policy used by the interpolation. Defaults to @ref SamplingPolicy::CENTER
194 * @param[in] use_padding (Optional) Is padding in use or not. Defaults to true.
195 * @param[in] align_corners (Optional) Align corners of input and output, only affecting bilinear policy with TOP_LEFT sampling policy. Defaults to false.
Michele Di Giorgio655e8c62021-01-28 12:51:02 +0000196 * @param[in] data_layout (Optional) Data layout used by the layer. Defaults to @ref DataLayout::UNKNOWN
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100197 */
198 ScaleKernelInfo(InterpolationPolicy interpolation_policy,
199 BorderMode border_mode,
200 PixelValue constant_border_value = PixelValue(),
201 SamplingPolicy sampling_policy = SamplingPolicy::CENTER,
202 bool use_padding = true,
Michele Di Giorgio655e8c62021-01-28 12:51:02 +0000203 bool align_corners = false,
Pablo Marquez Tello383de022021-03-18 11:31:13 +0000204 DataLayout data_layout = DataLayout::UNKNOWN) noexcept
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100205 : interpolation_policy{ interpolation_policy },
Pablo Marquez Tello383de022021-03-18 11:31:13 +0000206 border_mode{ border_mode },
207 constant_border_value{ constant_border_value },
208 sampling_policy{ sampling_policy },
209 use_padding{ use_padding },
210 align_corners{ align_corners },
211 data_layout{ data_layout }
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100212 {
213 }
214
215 InterpolationPolicy interpolation_policy; /**< Interpolation type to use */
216 BorderMode border_mode; /**< Border mode policy */
217 PixelValue constant_border_value; /**< Constant value to use for constant border mode policy */
218 SamplingPolicy sampling_policy; /**< Sampling policy used by the interpolation. */
219 bool use_padding; /**< Indication of using padding */
220 bool align_corners; /**< Align corners of input and output */
Michele Di Giorgio655e8c62021-01-28 12:51:02 +0000221 DataLayout data_layout; /**< Data layout to use */
Sang-Hoon Parkc2617982020-05-20 22:13:47 +0100222};
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000223
224struct MatMulKernelInfo
225{
Gian Marco Iodice352c07d2023-05-03 12:21:38 +0100226 MatMulKernelInfo() = default;
227 MatMulKernelInfo(bool adj_lhs, bool adj_rhs, int m0 = 1, int n0 = 1, int k0 = 1, bool export_rhs_to_cl_image = false)
Ramy Elgammal2b6ebfe2023-03-09 21:15:37 +0000228 : adj_lhs{ adj_lhs }, adj_rhs{ adj_rhs }, m0{ m0 }, n0{ n0 }, k0{ k0 }, export_rhs_to_cl_image{ export_rhs_to_cl_image }
229 {
230 }
231 bool adj_lhs{ false }; /**< Get Adjoint LHS flag value */
232 bool adj_rhs{ false }; /**< Get Adjoint RHS flag value */
233 int m0{ 1 }; /**< Number of output rows processed by each work-item*/
234 int n0{ 1 }; /**< Number of output columns processed by each work-item*/
235 int k0{ 1 }; /**< Number of inner accumulations */
236 bool export_rhs_to_cl_image{ false }; /**< Flag to know whether the RHS tensor should be exported to cl_image*/
237};
Georgios Pinitas0bc78492019-03-18 20:07:37 +0000238} // namespace arm_compute
Jakub Sujak0d27b2e2023-08-24 14:01:20 +0100239#endif // ACL_ARM_COMPUTE_CORE_KERNELDESCRIPTORS_H