blob: f64739ae3255690ad705433c1721c3541de93a7a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Sang-Hoon Park62eeb532019-10-29 13:13:19 +00002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
25#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu010d523cc2017-07-13 14:24:23 +010027#include "arm_compute/core/CL/ICLSimple3DKernel.h"
Sang-Hoon Park62eeb532019-10-29 13:13:19 +000028#include "arm_compute/core/KernelDescriptors.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029
Chunosovd6afedc2017-11-06 22:09:45 +070030#include <tuple>
31
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032namespace arm_compute
33{
34class ICLTensor;
35
36/** Interface for the identifying the max value of 1D Logits */
steniu010d523cc2017-07-13 14:24:23 +010037class CLLogits1DMaxKernel : public ICLSimple3DKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038{
39public:
40 /** Set the input and output tensors.
41 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010042 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitase5f8fd62017-06-23 18:03:44 +010043 * @param[out] output Destination tensor. Data types supported: same as @p input
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044 */
45 void configure(const ICLTensor *input, ICLTensor *output);
Georgios Pinitas30902ed2017-11-14 15:32:57 +000046 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxKernel
47 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010048 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +000049 * @param[in] output Destination tensor. Data types supported: same as @p input
50 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +000051 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +000052 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +000053 static Status validate(const ITensorInfo *input, const ITensorInfo *output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010054};
55
Chunosovd6afedc2017-11-06 22:09:45 +070056/** Interface for shifting, exponentiating and summing the logits */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010057class CLLogits1DShiftExpSumKernel : public ICLKernel
58{
59public:
60 /** Default constructor */
61 CLLogits1DShiftExpSumKernel();
62 /** Prevent instances of this class from being copied (As this class contains pointers) */
63 CLLogits1DShiftExpSumKernel(const CLLogits1DShiftExpSumKernel &) = delete;
64 /** Prevent instances of this class from being copied (As this class contains pointers) */
65 CLLogits1DShiftExpSumKernel &operator=(const CLLogits1DShiftExpSumKernel &) = delete;
66 /** Allow instances of this class to be moved */
67 CLLogits1DShiftExpSumKernel(CLLogits1DShiftExpSumKernel &&) = default;
68 /** Allow instances of this class to be moved */
69 CLLogits1DShiftExpSumKernel &operator=(CLLogits1DShiftExpSumKernel &&) = default;
70 /** Set the input and output tensors.
71 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010072 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitase5f8fd62017-06-23 18:03:44 +010073 * @param[in] max Max values tensor. Data types supported: same as @p input
Chunosovf450caa2017-11-08 16:09:35 +070074 * @param[out] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
75 * @param[out] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
76 * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.0
Anthony Barbier6ff3b192017-09-04 18:44:23 +010077 */
Pablo Palmier48a60f92017-10-18 11:03:08 +010078 void configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
Georgios Pinitas30902ed2017-11-14 15:32:57 +000079 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DShiftExpSumKernel
80 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010081 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +000082 * @param[in] max Max values tensor. Data types supported: same as @p input
83 * @param[in] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
84 * @param[in] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
85 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +000086 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +000087 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +000088 static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010089
90 // Inherited methods overridden:
91 void run(const Window &window, cl::CommandQueue &queue) override;
92
93private:
94 const ICLTensor *_input;
95 const ICLTensor *_max;
96 ICLTensor *_output;
97 ICLTensor *_sum;
98};
99
Chunosovd6afedc2017-11-06 22:09:45 +0700100/** Interface for max, shifting, exponentiating and summing the logits */
101class CLLogits1DMaxShiftExpSumKernel : public ICLKernel
102{
103public:
Alex Gildayc357c472018-03-21 13:54:09 +0000104 /** Info for whether a parallel reduction will be run and the vector size of the execution. */
Chunosovd6afedc2017-11-06 22:09:45 +0700105 using ParallelReductionInfo = std::tuple<bool, unsigned int>;
106
107public:
108 /** Default constructor */
109 CLLogits1DMaxShiftExpSumKernel();
110 /** Prevent instances of this class from being copied (As this class contains pointers) */
111 CLLogits1DMaxShiftExpSumKernel(const CLLogits1DMaxShiftExpSumKernel &) = delete;
112 /** Prevent instances of this class from being copied (As this class contains pointers) */
113 CLLogits1DMaxShiftExpSumKernel &operator=(const CLLogits1DMaxShiftExpSumKernel &) = delete;
114 /** Allow instances of this class to be moved */
115 CLLogits1DMaxShiftExpSumKernel(CLLogits1DMaxShiftExpSumKernel &&) = default;
116 /** Allow instances of this class to be moved */
117 CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default;
118 /** Set the input and output tensors.
119 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100120 * @param[in] input Source tensor. Data types supported: F16/F32
Chunosovd6afedc2017-11-06 22:09:45 +0700121 * @param[in,out] max Max values tensor. Data types supported: same as @p input
122 * @param[out] output Destination tensor. Data types supported: same as @p input
123 * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000124 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Chunosovd6afedc2017-11-06 22:09:45 +0700125 */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000126 void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000127 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel
128 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100129 * @param[in] input Source tensor. Data types supported: F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000130 * @param[in] max Max values tensor. Data types supported: same as @p input
131 * @param[in] output Destination tensor. Data types supported: same as @p input
132 * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input
133 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000134 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000135 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000136 static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
Chunosovd6afedc2017-11-06 22:09:45 +0700137 /** Checks if the given size is eligible for parallel reduction
138 *
139 * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size).
140 * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
141 *
142 * @param[in] size Size to check
143 *
Alex Gildayc357c472018-03-21 13:54:09 +0000144 * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run,
145 * while the second element is the vector size of the execution.
Chunosovd6afedc2017-11-06 22:09:45 +0700146 */
147 static ParallelReductionInfo is_parallel_reduction(size_t size);
148
149 // Inherited methods overridden:
150 void run(const Window &window, cl::CommandQueue &queue) override;
151
152private:
153 const ICLTensor *_input;
154 ICLTensor *_max;
155 ICLTensor *_output;
156 ICLTensor *_sum;
157
158private:
159 static const unsigned int _grid_size;
160 static const unsigned int _serial_vector_size;
161 static const unsigned int _parallel_vector_size;
162};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100163/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
164class CLLogits1DNormKernel : public ICLKernel
165{
166public:
167 /** Default constructor */
168 CLLogits1DNormKernel();
169 /** Prevent instances of this class from being copied (As this class contains pointers) */
170 CLLogits1DNormKernel(const CLLogits1DNormKernel &) = delete;
171 /** Prevent instances of this class from being copied (As this class contains pointers) */
172 CLLogits1DNormKernel &operator=(const CLLogits1DNormKernel &) = delete;
173 /** Allow instances of this class to be moved */
174 CLLogits1DNormKernel(CLLogits1DNormKernel &&) = default;
175 /** Allow instances of this class to be moved */
176 CLLogits1DNormKernel &operator=(CLLogits1DNormKernel &&) = default;
177 /** Set the input and output tensors.
178 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100179 * @param[in] input Source tensor. Data types supported: S32/F16/F32
Georgios Pinitase5f8fd62017-06-23 18:03:44 +0100180 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
Chunosovf450caa2017-11-08 16:09:35 +0700181 * @param[out] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000182 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100183 */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000184 void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000185 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel
186 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100187 * @param[in] input Source tensor. Data types supported: S32/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000188 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
189 * @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000190 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000191 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000192 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000193 */
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000194 static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100195
196 // Inherited methods overridden:
197 void run(const Window &window, cl::CommandQueue &queue) override;
198
199private:
200 const ICLTensor *_input;
201 const ICLTensor *_sum;
202 ICLTensor *_output;
203};
Gian Marco Iodicef670a0a2017-09-18 12:20:45 +0100204} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000205#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */