blob: 800d909a1c4f27ed0c5853d9341ad87efa494bd7 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottini4c6bd512020-04-08 10:15:51 +01002 * Copyright (c) 2017-2020 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
25#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu010d523cc2017-07-13 14:24:23 +010027#include "arm_compute/core/CL/ICLSimple3DKernel.h"
Sang-Hoon Park62eeb532019-10-29 13:13:19 +000028#include "arm_compute/core/KernelDescriptors.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029
Chunosovd6afedc2017-11-06 22:09:45 +070030#include <tuple>
31
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032namespace arm_compute
33{
34class ICLTensor;
35
36/** Interface for the identifying the max value of 1D Logits */
steniu010d523cc2017-07-13 14:24:23 +010037class CLLogits1DMaxKernel : public ICLSimple3DKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038{
39public:
40 /** Set the input and output tensors.
41 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010042 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitase5f8fd62017-06-23 18:03:44 +010043 * @param[out] output Destination tensor. Data types supported: same as @p input
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044 */
45 void configure(const ICLTensor *input, ICLTensor *output);
Manuel Bottini4c6bd512020-04-08 10:15:51 +010046 /** Set the input and output tensors.
47 *
48 * @param[in] compile_context The compile context to be used.
49 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
50 * @param[out] output Destination tensor. Data types supported: same as @p input
51 */
52 void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output);
Georgios Pinitas30902ed2017-11-14 15:32:57 +000053 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxKernel
54 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010055 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +000056 * @param[in] output Destination tensor. Data types supported: same as @p input
57 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +000058 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +000059 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +000060 static Status validate(const ITensorInfo *input, const ITensorInfo *output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010061};
62
Chunosovd6afedc2017-11-06 22:09:45 +070063/** Interface for shifting, exponentiating and summing the logits */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064class CLLogits1DShiftExpSumKernel : public ICLKernel
65{
66public:
67 /** Default constructor */
68 CLLogits1DShiftExpSumKernel();
69 /** Prevent instances of this class from being copied (As this class contains pointers) */
70 CLLogits1DShiftExpSumKernel(const CLLogits1DShiftExpSumKernel &) = delete;
71 /** Prevent instances of this class from being copied (As this class contains pointers) */
72 CLLogits1DShiftExpSumKernel &operator=(const CLLogits1DShiftExpSumKernel &) = delete;
73 /** Allow instances of this class to be moved */
74 CLLogits1DShiftExpSumKernel(CLLogits1DShiftExpSumKernel &&) = default;
75 /** Allow instances of this class to be moved */
76 CLLogits1DShiftExpSumKernel &operator=(CLLogits1DShiftExpSumKernel &&) = default;
77 /** Set the input and output tensors.
78 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010079 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitase5f8fd62017-06-23 18:03:44 +010080 * @param[in] max Max values tensor. Data types supported: same as @p input
Chunosovf450caa2017-11-08 16:09:35 +070081 * @param[out] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
82 * @param[out] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
83 * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.0
Anthony Barbier6ff3b192017-09-04 18:44:23 +010084 */
Pablo Palmier48a60f92017-10-18 11:03:08 +010085 void configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
Manuel Bottini4c6bd512020-04-08 10:15:51 +010086 /** Set the input and output tensors.
87 *
88 * @param[in] compile_context The compile context to be used.
89 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
90 * @param[in] max Max values tensor. Data types supported: same as @p input
91 * @param[out] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
92 * @param[out] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
93 * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.0
94 */
95 void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
Georgios Pinitas30902ed2017-11-14 15:32:57 +000096 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DShiftExpSumKernel
97 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010098 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +000099 * @param[in] max Max values tensor. Data types supported: same as @p input
100 * @param[in] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
101 * @param[in] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
102 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000103 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000104 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000105 static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106
107 // Inherited methods overridden:
108 void run(const Window &window, cl::CommandQueue &queue) override;
109
110private:
111 const ICLTensor *_input;
112 const ICLTensor *_max;
113 ICLTensor *_output;
114 ICLTensor *_sum;
115};
116
Chunosovd6afedc2017-11-06 22:09:45 +0700117/** Interface for max, shifting, exponentiating and summing the logits */
118class CLLogits1DMaxShiftExpSumKernel : public ICLKernel
119{
120public:
Alex Gildayc357c472018-03-21 13:54:09 +0000121 /** Info for whether a parallel reduction will be run and the vector size of the execution. */
Chunosovd6afedc2017-11-06 22:09:45 +0700122 using ParallelReductionInfo = std::tuple<bool, unsigned int>;
123
124public:
125 /** Default constructor */
126 CLLogits1DMaxShiftExpSumKernel();
127 /** Prevent instances of this class from being copied (As this class contains pointers) */
128 CLLogits1DMaxShiftExpSumKernel(const CLLogits1DMaxShiftExpSumKernel &) = delete;
129 /** Prevent instances of this class from being copied (As this class contains pointers) */
130 CLLogits1DMaxShiftExpSumKernel &operator=(const CLLogits1DMaxShiftExpSumKernel &) = delete;
131 /** Allow instances of this class to be moved */
132 CLLogits1DMaxShiftExpSumKernel(CLLogits1DMaxShiftExpSumKernel &&) = default;
133 /** Allow instances of this class to be moved */
134 CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default;
135 /** Set the input and output tensors.
136 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100137 * @param[in] input Source tensor. Data types supported: F16/F32
Chunosovd6afedc2017-11-06 22:09:45 +0700138 * @param[in,out] max Max values tensor. Data types supported: same as @p input
139 * @param[out] output Destination tensor. Data types supported: same as @p input
140 * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000141 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Chunosovd6afedc2017-11-06 22:09:45 +0700142 */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000143 void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100144 /** Set the input and output tensors.
145 *
146 * @param[in] compile_context The compile context to be used.
147 * @param[in] input Source tensor. Data types supported: F16/F32
148 * @param[in,out] max Max values tensor. Data types supported: same as @p input
149 * @param[out] output Destination tensor. Data types supported: same as @p input
150 * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
151 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
152 */
153 void configure(CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000154 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel
155 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100156 * @param[in] input Source tensor. Data types supported: F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000157 * @param[in] max Max values tensor. Data types supported: same as @p input
158 * @param[in] output Destination tensor. Data types supported: same as @p input
159 * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input
160 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000161 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000162 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000163 static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
Chunosovd6afedc2017-11-06 22:09:45 +0700164 /** Checks if the given size is eligible for parallel reduction
165 *
166 * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size).
167 * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
168 *
169 * @param[in] size Size to check
170 *
Alex Gildayc357c472018-03-21 13:54:09 +0000171 * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run,
172 * while the second element is the vector size of the execution.
Chunosovd6afedc2017-11-06 22:09:45 +0700173 */
174 static ParallelReductionInfo is_parallel_reduction(size_t size);
175
176 // Inherited methods overridden:
177 void run(const Window &window, cl::CommandQueue &queue) override;
178
179private:
180 const ICLTensor *_input;
181 ICLTensor *_max;
182 ICLTensor *_output;
183 ICLTensor *_sum;
184
185private:
186 static const unsigned int _grid_size;
187 static const unsigned int _serial_vector_size;
188 static const unsigned int _parallel_vector_size;
189};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100190/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
191class CLLogits1DNormKernel : public ICLKernel
192{
193public:
194 /** Default constructor */
195 CLLogits1DNormKernel();
196 /** Prevent instances of this class from being copied (As this class contains pointers) */
197 CLLogits1DNormKernel(const CLLogits1DNormKernel &) = delete;
198 /** Prevent instances of this class from being copied (As this class contains pointers) */
199 CLLogits1DNormKernel &operator=(const CLLogits1DNormKernel &) = delete;
200 /** Allow instances of this class to be moved */
201 CLLogits1DNormKernel(CLLogits1DNormKernel &&) = default;
202 /** Allow instances of this class to be moved */
203 CLLogits1DNormKernel &operator=(CLLogits1DNormKernel &&) = default;
204 /** Set the input and output tensors.
205 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100206 * @param[in] input Source tensor. Data types supported: S32/F16/F32
Georgios Pinitase5f8fd62017-06-23 18:03:44 +0100207 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
Chunosovf450caa2017-11-08 16:09:35 +0700208 * @param[out] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000209 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100210 */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000211 void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100212 /** Set the input and output tensors.
213 *
214 * @param[in] compile_context The compile context to be used.
215 * @param[in] input Source tensor. Data types supported: S32/F16/F32
216 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
217 * @param[out] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
218 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
219 */
220 void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000221 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel
222 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100223 * @param[in] input Source tensor. Data types supported: S32/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000224 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
225 * @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000226 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000227 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000228 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000229 */
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000230 static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100231
232 // Inherited methods overridden:
233 void run(const Window &window, cl::CommandQueue &queue) override;
234
235private:
236 const ICLTensor *_input;
237 const ICLTensor *_sum;
238 ICLTensor *_output;
239};
Gian Marco Iodicef670a0a2017-09-18 12:20:45 +0100240} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000241#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */