blob: 32c6eaa569ef2f3be17530197b8527aeca008ae1 [file] [log] [blame]
giuros01154bc1c2019-03-26 17:44:40 +00001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/Types.h"
30#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
31#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
32#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
33#include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
34#include "arm_compute/runtime/NEON/functions/NEPermute.h"
35#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
36#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
37#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
38#include "arm_compute/runtime/NEON/functions/NEReverse.h"
39#include "arm_compute/runtime/NEON/functions/NESlice.h"
40
41namespace arm_compute
42{
43// Forward declarations
44class ITensor;
45
46/** Basic function to execute FFT-based convolution on NEON. This function calls the following NEON functions/kernels:
47 *
48 * -# @ref NEPermute Permute input if NHWC(only NCHW is supported).
49 * -# @ref NEPadLayer Pad input.
50 * -# @ref NEFFT2D Forward transform to the frequency domain.
51 * -# @ref NEComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
52 * -# @ref NEReductionOperation Reduction across channels.
53 * -# @ref NEFFT2D Inverse transform back to the time domain.
54 * -# @ref NEStridedSlice Extract valid output.
55 * -# @ref NEArithmeticAddition Add bias.
56 * -# @ref NEActivationLayer Perform activation.
57 * -# @ref NEPermute Permute output if NHWC(only NCHW is supported).
58 */
59class NEFFTConvolutionLayer : public IFunction
60{
61public:
62 /** Default constructor */
63 NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
64 /** Prevent instances of this class from being copied (As this class contains pointers) */
65 NEFFTConvolutionLayer(const NEFFTConvolutionLayer &) = delete;
66 /** Default move constructor */
67 NEFFTConvolutionLayer(NEFFTConvolutionLayer &&) = default;
68 /** Prevent instances of this class from being copied (As this class contains pointers) */
69 NEFFTConvolutionLayer &operator=(const NEFFTConvolutionLayer &) = delete;
70 /** Default move assignment operator */
71 NEFFTConvolutionLayer &operator=(NEFFTConvolutionLayer &&) = default;
72 /** Set the input and output tensors.
73 *
74 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
75 *
76 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
77 * while every optional dimension from 4 and above represent a batch of inputs.
78 * Data types supported: F32.
79 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
80 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
81 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
82 * Data types supported: Same as @p input.
83 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
84 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
85 */
86 void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
87 const ActivationLayerInfo &act_info = ActivationLayerInfo());
88 /** Static function to check if given info will lead to a valid configuration of @ref NEFFTConvolutionLayer
89 *
90 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
91 *
92 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
93 * while every optional dimension from 4 and above represent a batch of inputs.
94 * Data types supported: F32.
95 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
96 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
97 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
98 * Data types supported: Same as @p input.
99 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
100 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
101 *
102 * @return a status
103 */
104 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
105 const ActivationLayerInfo &act_info = ActivationLayerInfo());
106
107 // Inherited methods overridden:
108 void run() override;
109 void prepare() override;
110
111private:
112 MemoryGroup _memory_group;
113 NEReverse _flip_weights_func;
114 NEPermute _permute_input_func;
115 NEPermute _permute_output_func;
116 NEPermute _permute_weights_func;
117 NEPermute _permute_bias_func;
118 NEPadLayer _pad_input_func;
119 NEPadLayer _pad_weights_func;
120 NEFFT2D _transform_input_func;
121 std::unique_ptr<NEFFT2D> _transform_weights_func;
122 NEFFT2D _itransform_output_func;
123 NEComplexPixelWiseMultiplication _prod_func;
124 NEReductionOperation _reduce_func;
125 NESlice _extract_output_func;
126 NEArithmeticAddition _bias_add_func;
127 NEActivationLayer _activation_layer_func;
128
129 Tensor _permuted_input;
130 Tensor _permuted_weights;
131 Tensor _permuted_bias;
132 Tensor _permuted_output;
133 Tensor _padded_input;
134 Tensor _padded_weights;
135 Tensor _flip_axis;
136 Tensor _flipped_weights;
137 Tensor _transformed_input;
138 Tensor _transformed_weights;
139 Tensor _input_weights_product;
140 Tensor _output_product;
141 Tensor _output_reduced;
142 Tensor _itransformed_output;
143 Tensor _reshaped_output;
144 Tensor _bias_output;
145
146 const ITensor *_original_weights;
147 const ITensor *_original_bias;
148 bool _is_activationlayer_enabled;
149 bool _needs_permute;
150 bool _has_bias;
151 bool _is_prepared;
152};
153} // namespace arm_compute
154#endif /* __ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H__ */