blob: f8c1019d532353925453916e243d12ab317f6eb3 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
25#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
steniu010d523cc2017-07-13 14:24:23 +010027#include "arm_compute/core/CL/ICLSimple3DKernel.h"
Sang-Hoon Park62eeb532019-10-29 13:13:19 +000028#include "arm_compute/core/KernelDescriptors.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029
30namespace arm_compute
31{
32class ICLTensor;
33
Chunosovd6afedc2017-11-06 22:09:45 +070034/** Interface for max, shifting, exponentiating and summing the logits */
35class CLLogits1DMaxShiftExpSumKernel : public ICLKernel
36{
37public:
Alex Gildayc357c472018-03-21 13:54:09 +000038 /** Info for whether a parallel reduction will be run and the vector size of the execution. */
Chunosovd6afedc2017-11-06 22:09:45 +070039 using ParallelReductionInfo = std::tuple<bool, unsigned int>;
40
41public:
42 /** Default constructor */
43 CLLogits1DMaxShiftExpSumKernel();
44 /** Prevent instances of this class from being copied (As this class contains pointers) */
45 CLLogits1DMaxShiftExpSumKernel(const CLLogits1DMaxShiftExpSumKernel &) = delete;
46 /** Prevent instances of this class from being copied (As this class contains pointers) */
47 CLLogits1DMaxShiftExpSumKernel &operator=(const CLLogits1DMaxShiftExpSumKernel &) = delete;
48 /** Allow instances of this class to be moved */
49 CLLogits1DMaxShiftExpSumKernel(CLLogits1DMaxShiftExpSumKernel &&) = default;
50 /** Allow instances of this class to be moved */
51 CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default;
52 /** Set the input and output tensors.
53 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010054 * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
Chunosovd6afedc2017-11-06 22:09:45 +070055 * @param[in,out] max Max values tensor. Data types supported: same as @p input
56 * @param[out] output Destination tensor. Data types supported: same as @p input
57 * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
Sang-Hoon Park62eeb532019-10-29 13:13:19 +000058 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Chunosovd6afedc2017-11-06 22:09:45 +070059 */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +000060 void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
Manuel Bottini4c6bd512020-04-08 10:15:51 +010061 /** Set the input and output tensors.
62 *
63 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010064 * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
Manuel Bottini4c6bd512020-04-08 10:15:51 +010065 * @param[in,out] max Max values tensor. Data types supported: same as @p input
66 * @param[out] output Destination tensor. Data types supported: same as @p input
67 * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
68 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
69 */
Manuel Bottini2803f702020-04-21 16:20:03 +010070 void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info);
Georgios Pinitas30902ed2017-11-14 15:32:57 +000071 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel
72 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +010073 * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
Georgios Pinitas30902ed2017-11-14 15:32:57 +000074 * @param[in] max Max values tensor. Data types supported: same as @p input
75 * @param[in] output Destination tensor. Data types supported: same as @p input
76 * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input
77 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +000078 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +000079 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +000080 static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum);
Chunosovd6afedc2017-11-06 22:09:45 +070081 /** Checks if the given size is eligible for parallel reduction
82 *
83 * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size).
84 * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
85 *
86 * @param[in] size Size to check
87 *
Alex Gildayc357c472018-03-21 13:54:09 +000088 * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run,
89 * while the second element is the vector size of the execution.
Chunosovd6afedc2017-11-06 22:09:45 +070090 */
91 static ParallelReductionInfo is_parallel_reduction(size_t size);
92
93 // Inherited methods overridden:
94 void run(const Window &window, cl::CommandQueue &queue) override;
95
96private:
97 const ICLTensor *_input;
98 ICLTensor *_max;
99 ICLTensor *_output;
100 ICLTensor *_sum;
101
102private:
103 static const unsigned int _grid_size;
104 static const unsigned int _serial_vector_size;
105 static const unsigned int _parallel_vector_size;
106};
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100107/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
108class CLLogits1DNormKernel : public ICLKernel
109{
110public:
111 /** Default constructor */
112 CLLogits1DNormKernel();
113 /** Prevent instances of this class from being copied (As this class contains pointers) */
114 CLLogits1DNormKernel(const CLLogits1DNormKernel &) = delete;
115 /** Prevent instances of this class from being copied (As this class contains pointers) */
116 CLLogits1DNormKernel &operator=(const CLLogits1DNormKernel &) = delete;
117 /** Allow instances of this class to be moved */
118 CLLogits1DNormKernel(CLLogits1DNormKernel &&) = default;
119 /** Allow instances of this class to be moved */
120 CLLogits1DNormKernel &operator=(CLLogits1DNormKernel &&) = default;
121 /** Set the input and output tensors.
122 *
Sang-Hoon Parka0205b92020-07-07 09:36:09 +0100123 * @param[in] input Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
Georgios Pinitase5f8fd62017-06-23 18:03:44 +0100124 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100125 * @param[out] output Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000126 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100127 */
Sang-Hoon Park62eeb532019-10-29 13:13:19 +0000128 void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100129 /** Set the input and output tensors.
130 *
131 * @param[in] compile_context The compile context to be used.
Sang-Hoon Parka0205b92020-07-07 09:36:09 +0100132 * @param[in] input Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100133 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100134 * @param[out] output Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100135 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
136 */
Manuel Bottini2803f702020-04-21 16:20:03 +0100137 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info);
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000138 /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel
139 *
Sang-Hoon Parka0205b92020-07-07 09:36:09 +0100140 * @param[in] input Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000141 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
142 * @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000143 * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000144 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000145 * @return a status
Georgios Pinitas30902ed2017-11-14 15:32:57 +0000146 */
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000147 static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100148
149 // Inherited methods overridden:
150 void run(const Window &window, cl::CommandQueue &queue) override;
151
152private:
153 const ICLTensor *_input;
154 const ICLTensor *_sum;
155 ICLTensor *_output;
156};
Gian Marco Iodicef670a0a2017-09-18 12:20:45 +0100157} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000158#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */