blob: cdc3a636b07dc3ce3733b4788e8b3a63cb40591e [file] [log] [blame]
Pablo Tellof5f34bb2017-08-22 13:34:13 +01001/*
Annop Wongwathanarat11f7d7e2023-01-12 11:35:37 +00002 * Copyright (c) 2017-2021, 2023 Arm Limited.
Pablo Tellof5f34bb2017-08-22 13:34:13 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEDECONVOLUTIONLAYER_H
25#define ARM_COMPUTE_NEDECONVOLUTIONLAYER_H
Pablo Tellof5f34bb2017-08-22 13:34:13 +010026
Michalis Spyrou33a69902018-02-23 15:01:52 +000027#include "arm_compute/runtime/CPP/functions/CPPUpsample.h"
Georgios Pinitasced7a8d2018-02-01 16:31:33 +000028#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Pablo Tellof5f34bb2017-08-22 13:34:13 +010029#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
Luca Foschianifedefc32020-02-17 17:02:49 +000030#include "arm_compute/runtime/NEON/functions/NEReverse.h"
Pablo Tellof5f34bb2017-08-22 13:34:13 +010031
32#include "arm_compute/core/Types.h"
33#include "arm_compute/runtime/IFunction.h"
34#include "arm_compute/runtime/IMemoryManager.h"
35#include "arm_compute/runtime/MemoryGroup.h"
36#include "arm_compute/runtime/Tensor.h"
37
38#include <memory>
39
40namespace arm_compute
41{
42/** Function to run the deconvolution layer.
43 *
Michalis Spyrou780db4e2017-11-23 09:49:51 +000044 * Deconvolution Layer is the backward pass of Convolution Layer. First we transform the input depending on the stride and pad info and then perfrom a 1x1
45 * convolution pass. Input stride defines how many zeroes we should put between each element of the input, pad is the amount of padding and finaly a is a user
46 * specified value where a < stride - 1 that increases the padding top and right of the input image.
Pablo Tellof5f34bb2017-08-22 13:34:13 +010047 *
Michalis Spyrou780db4e2017-11-23 09:49:51 +000048 * The relation between input to output is as follows:
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010049 * \f[
50 * width\_output = (width\_input - 1) \cdot stride\_x - 2 \cdot padding\_x + kernel\_x
51 * \f]
52 * \f[
53 * height\_output = (height\_input - 1) \cdot stride\_y - 2 \cdot padding\_y + kernel\_y
54 * \f]
Pablo Tellof5f34bb2017-08-22 13:34:13 +010055 *
56 * where
57 * width is the size of the first input dimension.
58 * height is the size of the second input dimension.
59 * width_output is the size of the first output dimension.
60 * height_output is the size of the second output dimension.
61 * kernel_x and kernel_y are the convolution sizes in x and y.
Michalis Spyrou780db4e2017-11-23 09:49:51 +000062 * stride_x and stride_y is the input stride of the first and second dimension.
Pablo Tellof5f34bb2017-08-22 13:34:13 +010063 *
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010064 * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution. Therefore, it will be necessary to use the weights in the
Luca Foschianifedefc32020-02-17 17:02:49 +000065 * reverse order to perform an actual convolution. This is achieved by using @ref NEReverse.
Pablo Tellof5f34bb2017-08-22 13:34:13 +010066 *
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +000067 * This function calls the following kernels/functions:
Michele Di Giorgioed5a4922018-09-13 16:22:01 +010068 *
69 * -# @ref CPPUpsample
70 * -# @ref NEConvolutionLayer
Luca Foschianifedefc32020-02-17 17:02:49 +000071 * -# @ref NEReverse
Pablo Tellof5f34bb2017-08-22 13:34:13 +010072 *
73 */
74class NEDeconvolutionLayer : public IFunction
75{
76public:
Michalis Spyrou1a569a32019-09-10 17:20:34 +010077 /** Constructor */
Pablo Tellof5f34bb2017-08-22 13:34:13 +010078 NEDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Michalis Spyrou780db4e2017-11-23 09:49:51 +000079 /** Prevent instances of this class from being copied (As this class contains pointers) */
80 NEDeconvolutionLayer(const NEDeconvolutionLayer &) = delete;
Michalis Spyroub55f8e82021-07-22 11:23:11 +010081 /** Default move constructor */
82 NEDeconvolutionLayer(NEDeconvolutionLayer &&) = default;
Michalis Spyrou780db4e2017-11-23 09:49:51 +000083 /** Prevent instances of this class from being copied (As this class contains pointers) */
84 NEDeconvolutionLayer &operator=(const NEDeconvolutionLayer &) = delete;
Michalis Spyroub55f8e82021-07-22 11:23:11 +010085 /** Default move assignment operator */
86 NEDeconvolutionLayer &operator=(NEDeconvolutionLayer &&) = default;
Michalis Spyrou780db4e2017-11-23 09:49:51 +000087 /** Default destructor */
Michalis Spyroub55f8e82021-07-22 11:23:11 +010088 ~NEDeconvolutionLayer() = default;
Pablo Tellof5f34bb2017-08-22 13:34:13 +010089
giuros01a69a88b2019-01-31 16:29:19 +000090 /** Set the input, weights, biases and output tensors.
91 *
Teresa Charlin62687422021-04-28 10:58:49 +010092 * Valid data layouts:
93 * - NHWC
94 * - NCHW
95 *
96 * Valid data type configurations:
97 * |src0 |src1 |src2 |dst |
98 * |:--------------|:------------------|:------|:--------------|
99 * |F16 |F16 |F16 |F16 |
100 * |F32 |F32 |F32 |F32 |
101 * |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
102 * |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 |
103 * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
104 * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED |
105 *
Annop Wongwathanarat11f7d7e2023-01-12 11:35:37 +0000106 * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs.
107 * Data types supported: F32/F16/QASYMM8/QASYMM8_SIGNED.
108 * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM].
109 * Data type supported: Same as @p input, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
110 * @param[in] bias Optional, ignored if NULL. The biases have one dimension.
111 * Data type supported: Data types supported: S32 for QASYMM8/QASYMM8_SIGNED input, F32 for F32 input, F16 for F16 input.
112 * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000113 * @param[in] info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
Annop Wongwathanarat11f7d7e2023-01-12 11:35:37 +0000114 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000115 * available which may introduce a drop of accuracy as well. Default is false
116 * @param[in] weights_info (Optional) Specifies the weight format. Default is unspecified. This parameter can be used to specify the weight format that is optimal for
117 * the GEMM convolution.
giuros01a69a88b2019-01-31 16:29:19 +0000118 *
119 */
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000120 void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info, bool enable_fast_math = false, const WeightsInfo &weights_info = WeightsInfo());
giuros01a69a88b2019-01-31 16:29:19 +0000121 /** Static function to check if given info will lead to a valid configuration of @ref NEDeconvolutionLayer
122 *
Annop Wongwathanarat11f7d7e2023-01-12 11:35:37 +0000123 * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs.
124 * Data types supported: F32/F16/QASYMM8/QASYMM8_SIGNED.
125 * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM].
126 * Data type supported: Same as @p input, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
127 * @param[in] bias (Optional) The biases have one dimension. Data type supported: Data types supported: S32 for QASYMM8/QASYMM8_SIGNED input, F32 for F32 input, F16 for F16 input.
128 * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000129 * @param[in] info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
Annop Wongwathanarat11f7d7e2023-01-12 11:35:37 +0000130 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000131 * available which may introduce a drop of accuracy as well. Default is false
132 * @param[in] weights_info (Optional) Specifies the weight format. Default is unspecified. This parameter can be used to specify the weight format that is optimal for
133 * the GEMM convolution.
giuros01a69a88b2019-01-31 16:29:19 +0000134 *
135 * @return a status
136 */
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000137 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &info,
138 bool enable_fast_math = false, const WeightsInfo &weights_info = WeightsInfo());
giuros01a69a88b2019-01-31 16:29:19 +0000139
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100140 // Inherited methods overridden:
141 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +0100142 void prepare() override;
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100143
144private:
Luca Foschianifedefc32020-02-17 17:02:49 +0000145 MemoryGroup _memory_group;
146 NEConvolutionLayer _conv_f;
147 CPPUpsample _upsample_f;
148 NEReverse _flip_weights;
Luca Foschianifedefc32020-02-17 17:02:49 +0000149 Tensor _scaled_output;
150 Tensor _weights_flipped;
Luca Foschianifedefc32020-02-17 17:02:49 +0000151 Tensor _flip_axis;
Luca Foschianifedefc32020-02-17 17:02:49 +0000152 const ITensor *_original_weights;
153 ITensor *_input;
154 PadStrideInfo _info;
155 bool _is_prepared;
Annop Wongwathanaratb609c932023-01-16 14:36:45 +0000156 bool _do_upsampling;
Pablo Tellof5f34bb2017-08-22 13:34:13 +0100157};
Annop Wongwathanaratadfcacc2023-03-01 15:19:50 +0000158} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000159#endif /* ARM_COMPUTE_NEDECONVOLUTIONLAYER_H */