blob: 4932aeff5abf112f9b1ad776077a9a1023866d79 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottini678d83a2019-01-07 16:05:36 +00002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NESOFTMAXLAYER_H__
25#define __ARM_COMPUTE_NESOFTMAXLAYER_H__
26
27#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
Manuel Bottini678d83a2019-01-07 16:05:36 +000028#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h"
29#include "arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h"
31#include "arm_compute/runtime/IFunction.h"
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010032#include "arm_compute/runtime/MemoryGroup.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033#include "arm_compute/runtime/Tensor.h"
34
35namespace arm_compute
36{
37class ITensor;
38
39/** Basic function to compute a SoftmaxLayer.
40 *
41 * Softmax is calculated by :
42 * @f[ out = \frac{e^{x - max(x)}}{\sum{e^{x - max(x)}}} @f]
43 *
44 * This function runs the following kernels:
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000045 * -# @ref NEFillBorderKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010046 * -# @ref NELogits1DMaxKernel
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000047 * -# @ref NELogits1DSoftmaxKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010048 */
49class NESoftmaxLayer : public IFunction
50{
51public:
52 /** Constructor */
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010053 NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Manuel Bottini678d83a2019-01-07 16:05:36 +000054 /** Prevent instances of this class from being copied (As this class contains pointers) */
55 NESoftmaxLayer(const NESoftmaxLayer &) = delete;
56 /** Default move constructor */
57 NESoftmaxLayer(NESoftmaxLayer &&) = default;
58 /** Prevent instances of this class from being copied (As this class contains pointers) */
59 NESoftmaxLayer &operator=(const NESoftmaxLayer &) = delete;
60 /** Default move assignment operator */
61 NESoftmaxLayer &operator=(NESoftmaxLayer &&) = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010062 /** Set the input and output tensors.
63 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010064 * @param[in,out] input Source tensor. Data types supported: QASYMM8/F16/F32. If the width is not a
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000065 * multiple of the internal processing block size, @ref NEFillBorderKernel replicates the
66 * last value of each row to the nearest multiple.
67 * @param[out] output Destination tensor. Data types supported: same as @p input.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010068 * @param[in] beta (Optional) A scaling factor for the exponent.
Manuel Bottini678d83a2019-01-07 16:05:36 +000069 * @param[in] axis (Optional) Reduction axis. Defaults to 1. Must be in range [1, input_num_dimensions).
70 * It has the purpose of squashing the first @p axis dimensions together. For instance, given a [4x4x4x4] image,
giuros01efbf6c82018-09-03 09:53:53 +010071 * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072 */
giuros01efbf6c82018-09-03 09:53:53 +010073 void configure(ITensor *input, ITensor *output, float beta = 1.0f, size_t axis = 1);
Michalis Spyrouafa5d812017-11-30 14:25:57 +000074 /** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer
75 *
Manuel Bottini678d83a2019-01-07 16:05:36 +000076 * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
77 * @param[in] output Destination tensor info. Data types supported: same as @p input
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010078 * @param[in] beta (Optional) A scaling factor for the exponent.
Manuel Bottini678d83a2019-01-07 16:05:36 +000079 * @param[in] axis (Optional) Reduction axis. Defaults to 1. Must be in range [1, input_num_dimensions).
80 * It has the purpose of squashing the first @p axis dimensions together. For instance, given a [4x4x4x4] image,
giuros01efbf6c82018-09-03 09:53:53 +010081 * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
82 *
Michalis Spyrouafa5d812017-11-30 14:25:57 +000083 * @return a status
84 */
giuros01efbf6c82018-09-03 09:53:53 +010085 static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, size_t axis = 1);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010086
87 // Inherited methods overridden:
88 void run() override;
89
90private:
Manuel Bottini678d83a2019-01-07 16:05:36 +000091 /** Utility method to configure the kernels needed to flatten the input
92 * tensor.
93 *
94 * @note This function changes the internal state of this class. In particular,
95 * it initializes the kernel @p _flatten_kernel and the tensors @p _input_flat and
96 * @p _output_flat
97 *
98 * @param[in] input Original source tensor.
99 * @param[in] output Original destination tensor.
100 * @param[in] axis (Optional) Reduction axis. Defaults to 1. Must be in range [1, input_num_dimensions).
101 * It has the purpose of squashing the first @p axis dimensions together. For instance, given a [4x4x4x4] image,
102 * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
103 */
104 void configure_reshape_input_kernel(const ITensor *input, const ITensor *output, size_t axis);
105
106 MemoryGroup _memory_group;
107 NELogits1DMaxKernel _max_kernel;
108 NELogits1DSoftmaxKernel _softmax_kernel;
109 std::unique_ptr<INEKernel> _flat_or_reshape_kernel_ptr;
110 NEFillBorderKernel _fill_border_kernel;
111 NEReshapeLayerKernel _reshape_kernel;
112 Tensor _max;
113 Tensor _tmp;
114 Tensor _input_flattened;
115 Tensor _output_flattened;
116 bool _needs_flattening;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100117};
Manuel Bottini678d83a2019-01-07 16:05:36 +0000118} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100119#endif /* __ARM_COMPUTE_NESOFTMAXLAYER_H__ */