blob: b1b88103bf53eb7d79b293afe2fdb33f7e003a1f [file] [log] [blame]
Michalis Spyroub91e34c2017-12-20 15:50:55 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Michalis Spyroub91e34c2017-12-20 15:50:55 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H
25#define ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H
Michalis Spyroub91e34c2017-12-20 15:50:55 +000026
Michele Di Giorgio45361932019-12-19 13:53:44 +000027#include "arm_compute/core/KernelDescriptors.h"
Michalis Spyrouebcebf12020-10-21 00:04:14 +010028#include "src/core/NEON/INEKernel.h"
Michalis Spyroub91e34c2017-12-20 15:50:55 +000029
30namespace arm_compute
31{
32class ITensor;
33/** NEON kernel to accumulate the biases, if provided, or downscale in case of quantized input.
34 *
35 * @note We assume bias to be shared
Michele Di Giorgio45361932019-12-19 13:53:44 +000036 * @note For quantized computations (i.e. @p input of S32 type) the output data type for auto-initialization must be passed as part
37 * of the @ref DirectConvolutionLayerOutputStageKernelInfo.
Michalis Spyroub91e34c2017-12-20 15:50:55 +000038 */
39class NEDirectConvolutionLayerOutputStageKernel : public INEKernel
40{
41public:
Anthony Barbiere8a49832018-01-18 10:04:05 +000042 const char *name() const override
43 {
44 return "NEDirectConvolutionLayerOutputStageKernel";
45 }
Michalis Spyroub91e34c2017-12-20 15:50:55 +000046 /** Default constructor */
47 NEDirectConvolutionLayerOutputStageKernel();
48 /** Prevent instances of this class from being copied (As this class contains pointers) */
49 NEDirectConvolutionLayerOutputStageKernel(const NEDirectConvolutionLayerOutputStageKernel &) = delete;
50 /** Prevent instances of this class from being copied (As this class contains pointers) */
51 NEDirectConvolutionLayerOutputStageKernel &operator=(const NEDirectConvolutionLayerOutputStageKernel &) = delete;
52 /** Allow instances of this class to be moved */
53 NEDirectConvolutionLayerOutputStageKernel(NEDirectConvolutionLayerOutputStageKernel &&) = default;
54 /** Allow instances of this class to be moved */
55 NEDirectConvolutionLayerOutputStageKernel &operator=(NEDirectConvolutionLayerOutputStageKernel &&) = default;
56 /** Default destructor */
57 ~NEDirectConvolutionLayerOutputStageKernel() = default;
58 /** Set the accumulate buffer and the biases of the kernel.
59 *
Michele Di Giorgio45361932019-12-19 13:53:44 +000060 * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
61 * Data type supported: F16/F32/S32
62 * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
63 * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
64 * Note that in-place computation is only supported for F16/F32. For S32 this must not be nullptr.
65 * Data type supported: F16/F32 or QASYMM8/QASYMM8_SIGNED if @p input is S32
66 * @param[in] info (Optional) DirectConvolutionLayerOutputStageKernel descriptor metadata
Michalis Spyroub91e34c2017-12-20 15:50:55 +000067 */
Georgios Pinitasf72f9362018-01-12 16:29:45 +000068 void configure(ITensor *input, const ITensor *bias = nullptr, ITensor *output = nullptr,
Michele Di Giorgio45361932019-12-19 13:53:44 +000069 const DirectConvolutionLayerOutputStageKernelInfo &info = DirectConvolutionLayerOutputStageKernelInfo());
Michalis Spyroub91e34c2017-12-20 15:50:55 +000070 /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel
71 *
Michele Di Giorgio45361932019-12-19 13:53:44 +000072 * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
73 * Data type supported: F16/F32/S32
74 * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
75 * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
76 * Note that in-place computation is only supported for F16/F32. For S32 this must not be nullptr.
77 * Data type supported: F16/F32 or QASYMM8/QASYMM8_SIGNED if @p input is S32
78 * @param[in] info (Optional) DirectConvolutionLayerOutputStageKernel descriptor metadata
Michele Di Giorgioff271922019-07-17 15:59:32 +010079 *
Michalis Spyroub91e34c2017-12-20 15:50:55 +000080 * @return a status
81 */
Michele Di Giorgioff271922019-07-17 15:59:32 +010082 static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr,
Michele Di Giorgio45361932019-12-19 13:53:44 +000083 const DirectConvolutionLayerOutputStageKernelInfo &info = DirectConvolutionLayerOutputStageKernelInfo());
Michalis Spyroub91e34c2017-12-20 15:50:55 +000084
85 // Inherited methods overridden:
86 void run(const Window &window, const ThreadInfo &info) override;
87
88private:
Georgios Pinitasf72f9362018-01-12 16:29:45 +000089 using OutputStageKernel = void(ITensor *input, const ITensor *bias, const Window &window, ITensor *output,
Michalis Spyrou14e868e2020-09-30 00:33:05 +010090 int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, bool has_bias);
Michalis Spyroub91e34c2017-12-20 15:50:55 +000091
92private:
93 OutputStageKernel *_func;
94 ITensor *_input;
95 const ITensor *_bias;
96 ITensor *_output;
Georgios Pinitasf72f9362018-01-12 16:29:45 +000097 int _result_fixedpoint_multiplier;
98 int _result_shift;
99 int _result_offset_after_shift;
Michalis Spyroub91e34c2017-12-20 15:50:55 +0000100};
101} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000102#endif /*ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H */