blob: 3f5ec8e8205ef237487e17777ed95c1c91e439a2 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NESOFTMAXLAYER_H__
25#define __ARM_COMPUTE_NESOFTMAXLAYER_H__
26
27#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
28#include "arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h"
29#include "arm_compute/runtime/IFunction.h"
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010030#include "arm_compute/runtime/MemoryGroup.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/runtime/Tensor.h"
32
33namespace arm_compute
34{
35class ITensor;
36
37/** Basic function to compute a SoftmaxLayer.
38 *
39 * Softmax is calculated by :
40 * @f[ out = \frac{e^{x - max(x)}}{\sum{e^{x - max(x)}}} @f]
41 *
42 * This function runs the following kernels:
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000043 * -# @ref NEFillBorderKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044 * -# @ref NELogits1DMaxKernel
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000045 * -# @ref NELogits1DSoftmaxKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010046 */
47class NESoftmaxLayer : public IFunction
48{
49public:
50 /** Constructor */
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010051 NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010052 /** Set the input and output tensors.
53 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010054 * @param[in,out] input Source tensor. Data types supported: QASYMM8/F16/F32. If the width is not a
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000055 * multiple of the internal processing block size, @ref NEFillBorderKernel replicates the
56 * last value of each row to the nearest multiple.
57 * @param[out] output Destination tensor. Data types supported: same as @p input.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010058 * @param[in] beta (Optional) A scaling factor for the exponent.
giuros01efbf6c82018-09-03 09:53:53 +010059 * @param[in] axis (Optional) Reduction axis. It has the purpose of squashing the first @p axis
60 * dimensions together. For instance, given a [4x4x4x4] image,
61 * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
62 *
63 * @note The value of @p axis must be always 1 for NEON
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064 */
giuros01efbf6c82018-09-03 09:53:53 +010065 void configure(ITensor *input, ITensor *output, float beta = 1.0f, size_t axis = 1);
Michalis Spyrouafa5d812017-11-30 14:25:57 +000066 /** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer
67 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010068 * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
Michalis Spyrouafa5d812017-11-30 14:25:57 +000069 * @param[in] output Destination tensor. Data types supported: same as @p input
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010070 * @param[in] beta (Optional) A scaling factor for the exponent.
giuros01efbf6c82018-09-03 09:53:53 +010071 * @param[in] axis (Optional) Reduction axis. It has the purpose of squashing the first @p axis
72 * dimensions together. For instance, given a [4x4x4x4] image,
73 * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
74 *
75 * @note The value of @p axis must be always 1 for NEON
Michalis Spyrouafa5d812017-11-30 14:25:57 +000076 *
77 * @return a status
78 */
giuros01efbf6c82018-09-03 09:53:53 +010079 static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, size_t axis = 1);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010080
81 // Inherited methods overridden:
82 void run() override;
83
84private:
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +000085 MemoryGroup _memory_group;
86 NELogits1DMaxKernel _max_kernel;
87 NELogits1DSoftmaxKernel _softmax_kernel;
88 NEFillBorderKernel _fill_border_kernel;
89 Tensor _max;
90 Tensor _tmp;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010091};
92}
93#endif /* __ARM_COMPUTE_NESOFTMAXLAYER_H__ */