blob: 53ce63333b91c1713fa81b7741e8558372adf6d2 [file] [log] [blame]
Georgios Pinitas8be91482019-03-26 17:23:28 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019-2020 Arm Limited.
Georgios Pinitas8be91482019-03-26 17:23:28 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLFFTCONVOLUTIONLAYER_H
25#define ARM_COMPUTE_CLFFTCONVOLUTIONLAYER_H
Georgios Pinitas8be91482019-03-26 17:23:28 +000026
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/Types.h"
30#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
31#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
32#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
33#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
34#include "arm_compute/runtime/CL/functions/CLPermute.h"
35#include "arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h"
36#include "arm_compute/runtime/CL/functions/CLReductionOperation.h"
37#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
38#include "arm_compute/runtime/CL/functions/CLReverse.h"
39#include "arm_compute/runtime/CL/functions/CLSlice.h"
40
41namespace arm_compute
42{
43// Forward declarations
44class ICLTensor;
45
46/** Basic function to execute FFT-based convolution on OpenCL. This function calls the following OpenCL functions/kernels:
47 *
48 * -# @ref CLPermute Permute input if NHWC(only NCHW is supported).
49 * -# @ref CLPadLayer Pad input.
50 * -# @ref CLFFT2D Forward transform to the frequency domain.
51 * -# @ref CLComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
52 * -# @ref CLReductionOperation Reduction across channels.
53 * -# @ref CLFFT2D Inverse transform back to the time domain.
54 * -# @ref CLStridedSlice Extract valid output.
55 * -# @ref CLArithmeticAddition Add bias.
56 * -# @ref CLActivationLayer Perform activation.
57 * -# @ref CLPermute Permute output if NHWC(only NCHW is supported).
58 */
59class CLFFTConvolutionLayer : public IFunction
60{
61public:
62 /** Default constructor */
63 CLFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
64 /** Prevent instances of this class from being copied (As this class contains pointers) */
65 CLFFTConvolutionLayer(const CLFFTConvolutionLayer &) = delete;
66 /** Default move constructor */
67 CLFFTConvolutionLayer(CLFFTConvolutionLayer &&) = default;
68 /** Prevent instances of this class from being copied (As this class contains pointers) */
69 CLFFTConvolutionLayer &operator=(const CLFFTConvolutionLayer &) = delete;
70 /** Default move assignment operator */
71 CLFFTConvolutionLayer &operator=(CLFFTConvolutionLayer &&) = default;
72 /** Set the input and output tensors.
73 *
74 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
75 *
76 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
77 * while every optional dimension from 4 and above represent a batch of inputs.
78 * Data types supported: F32.
79 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
80 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
81 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
82 * Data types supported: Same as @p input.
83 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
84 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
85 */
86 void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
87 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Manuel Bottini2b84be52020-04-08 10:15:51 +010088 /** Set the input and output tensors.
89 *
90 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
91 *
92 * @param[in] compile_context The compile context to be used.
93 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
94 * while every optional dimension from 4 and above represent a batch of inputs.
95 * Data types supported: F32.
96 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
97 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
98 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
99 * Data types supported: Same as @p input.
100 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
101 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
102 */
103 void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
104 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Georgios Pinitas8be91482019-03-26 17:23:28 +0000105 /** Static function to check if given info will lead to a valid configuration of @ref CLFFTConvolutionLayer
106 *
107 * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
108 *
109 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
110 * while every optional dimension from 4 and above represent a batch of inputs.
111 * Data types supported: F32.
112 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
113 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
114 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
115 * Data types supported: Same as @p input.
116 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
117 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
118 *
119 * @return a status
120 */
121 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
122 const ActivationLayerInfo &act_info = ActivationLayerInfo());
123
124 // Inherited methods overridden:
125 void run() override;
126 void prepare() override;
127
128private:
Georgios Pinitas26014cf2019-09-09 19:00:57 +0100129 MemoryGroup _memory_group;
Georgios Pinitas8be91482019-03-26 17:23:28 +0000130 CLReverse _flip_weights_func;
131 CLPermute _permute_input_func;
132 CLPermute _permute_output_func;
133 CLPermute _permute_weights_func;
134 CLPermute _permute_bias_func;
135 CLPadLayer _pad_input_func;
136 CLPadLayer _pad_weights_func;
137 CLFFT2D _transform_input_func;
Georgios Pinitas098516b2019-04-25 18:25:06 +0100138 std::unique_ptr<CLFFT2D> _transform_weights_func;
Georgios Pinitas8be91482019-03-26 17:23:28 +0000139 CLFFT2D _itransform_output_func;
140 CLComplexPixelWiseMultiplication _prod_func;
141 CLReductionOperation _reduce_func;
142 CLSlice _extract_output_func;
143 CLArithmeticAddition _bias_add_func;
144 CLActivationLayer _activation_layer_func;
145
146 CLTensor _permuted_input;
147 CLTensor _permuted_weights;
148 CLTensor _permuted_bias;
149 CLTensor _permuted_output;
150 CLTensor _padded_input;
151 CLTensor _padded_weights;
152 CLTensor _flip_axis;
153 CLTensor _flipped_weights;
154 CLTensor _transformed_input;
155 CLTensor _transformed_weights;
156 CLTensor _input_weights_product;
157 CLTensor _output_product;
158 CLTensor _output_reduced;
159 CLTensor _itransformed_output;
160 CLTensor _reshaped_output;
161 CLTensor _bias_output;
162
163 const ICLTensor *_original_weights;
164 const ICLTensor *_original_bias;
165 bool _is_activationlayer_enabled;
166 bool _needs_permute;
167 bool _has_bias;
168 bool _is_prepared;
169};
170} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000171#endif /* ARM_COMPUTE_CLFFTCONVOLUTIONLAYER_H */