blob: b3e98fc2d61e48291126a07b749ffc29f1e08639 [file] [log] [blame]
giuros01154bc1c2019-03-26 17:44:40 +00001/*
Michalis Spyrouebcebf12020-10-21 00:04:14 +01002 * Copyright (c) 2019-2020 Arm Limited.
giuros01154bc1c2019-03-26 17:44:40 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H
25#define ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H
giuros01154bc1c2019-03-26 17:44:40 +000026
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/Types.h"
30#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
31#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
32#include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
33#include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
34#include "arm_compute/runtime/NEON/functions/NEPermute.h"
35#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
36#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
37#include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
38#include "arm_compute/runtime/NEON/functions/NEReverse.h"
39#include "arm_compute/runtime/NEON/functions/NESlice.h"
40
41namespace arm_compute
42{
43// Forward declarations
44class ITensor;
45
46/** Basic function to execute FFT-based convolution on NEON. This function calls the following NEON functions/kernels:
47 *
48 * -# @ref NEPermute Permute input if NHWC(only NCHW is supported).
49 * -# @ref NEPadLayer Pad input.
50 * -# @ref NEFFT2D Forward transform to the frequency domain.
51 * -# @ref NEComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
52 * -# @ref NEReductionOperation Reduction across channels.
53 * -# @ref NEFFT2D Inverse transform back to the time domain.
54 * -# @ref NEStridedSlice Extract valid output.
55 * -# @ref NEArithmeticAddition Add bias.
56 * -# @ref NEActivationLayer Perform activation.
57 * -# @ref NEPermute Permute output if NHWC(only NCHW is supported).
58 */
59class NEFFTConvolutionLayer : public IFunction
60{
61public:
62 /** Default constructor */
63 NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
64 /** Prevent instances of this class from being copied (As this class contains pointers) */
65 NEFFTConvolutionLayer(const NEFFTConvolutionLayer &) = delete;
66 /** Default move constructor */
67 NEFFTConvolutionLayer(NEFFTConvolutionLayer &&) = default;
68 /** Prevent instances of this class from being copied (As this class contains pointers) */
69 NEFFTConvolutionLayer &operator=(const NEFFTConvolutionLayer &) = delete;
70 /** Default move assignment operator */
71 NEFFTConvolutionLayer &operator=(NEFFTConvolutionLayer &&) = default;
Michalis Spyrouebcebf12020-10-21 00:04:14 +010072 /** Default destructor */
73 ~NEFFTConvolutionLayer();
giuros01154bc1c2019-03-26 17:44:40 +000074 /** Set the input and output tensors.
75 *
76 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
77 *
78 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
79 * while every optional dimension from 4 and above represent a batch of inputs.
80 * Data types supported: F32.
81 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
82 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
83 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
84 * Data types supported: Same as @p input.
85 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
86 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
87 */
88 void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
89 const ActivationLayerInfo &act_info = ActivationLayerInfo());
90 /** Static function to check if given info will lead to a valid configuration of @ref NEFFTConvolutionLayer
91 *
92 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
93 *
94 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
95 * while every optional dimension from 4 and above represent a batch of inputs.
96 * Data types supported: F32.
97 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
98 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
99 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
100 * Data types supported: Same as @p input.
101 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
102 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
103 *
104 * @return a status
105 */
106 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
107 const ActivationLayerInfo &act_info = ActivationLayerInfo());
108
109 // Inherited methods overridden:
110 void run() override;
111 void prepare() override;
112
113private:
114 MemoryGroup _memory_group;
115 NEReverse _flip_weights_func;
116 NEPermute _permute_input_func;
117 NEPermute _permute_output_func;
118 NEPermute _permute_weights_func;
119 NEPermute _permute_bias_func;
120 NEPadLayer _pad_input_func;
121 NEPadLayer _pad_weights_func;
122 NEFFT2D _transform_input_func;
123 std::unique_ptr<NEFFT2D> _transform_weights_func;
124 NEFFT2D _itransform_output_func;
125 NEComplexPixelWiseMultiplication _prod_func;
126 NEReductionOperation _reduce_func;
127 NESlice _extract_output_func;
128 NEArithmeticAddition _bias_add_func;
129 NEActivationLayer _activation_layer_func;
130
131 Tensor _permuted_input;
132 Tensor _permuted_weights;
133 Tensor _permuted_bias;
134 Tensor _permuted_output;
135 Tensor _padded_input;
136 Tensor _padded_weights;
137 Tensor _flip_axis;
138 Tensor _flipped_weights;
139 Tensor _transformed_input;
140 Tensor _transformed_weights;
141 Tensor _input_weights_product;
142 Tensor _output_product;
143 Tensor _output_reduced;
144 Tensor _itransformed_output;
145 Tensor _reshaped_output;
146 Tensor _bias_output;
147
148 const ITensor *_original_weights;
149 const ITensor *_original_bias;
150 bool _is_activationlayer_enabled;
151 bool _needs_permute;
152 bool _has_bias;
153 bool _is_prepared;
154};
155} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000156#endif /* ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H */