blob: a362a29a82c1ea2cd66bfbb5f50fae8b9ed02485 [file] [log] [blame]
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
Michalis Spyroue2503892018-04-23 15:17:31 +010029#include "arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000030#include "arm_compute/core/NEON/kernels/NECol2ImKernel.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000031#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
32#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h"
33#include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
34#include "arm_compute/core/Types.h"
35#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000036#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
Gian Marco Iodice597a8562018-08-01 15:06:06 +010037#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000038#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
39#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
40#include "arm_compute/runtime/Tensor.h"
41
42#include <memory>
43
44namespace arm_compute
45{
46class ITensor;
47
Gian Marco Iodice597a8562018-08-01 15:06:06 +010048/** Function to reshape the weights. This function calls the following kernel:
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000049 * -# @ref NEWeightsReshapeKernel
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000050 */
51class NEConvolutionLayerReshapeWeights : public IFunction
52{
53public:
54 /** Constructor */
Gian Marco Iodice597a8562018-08-01 15:06:06 +010055 NEConvolutionLayerReshapeWeights();
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000056 /** Set the input and output tensors.
57 *
Gian Marco Iodice597a8562018-08-01 15:06:06 +010058 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QASYMM8/F16/F32.
59 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
60 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000061 */
Gian Marco Iodice597a8562018-08-01 15:06:06 +010062 void configure(const ITensor *weights, const ITensor *biases, ITensor *output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000063 /** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayerReshapeWeights
64 *
Gian Marco Iodice597a8562018-08-01 15:06:06 +010065 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QASYMM8/F16/F32.
66 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
67 * @param[in] output Destination tensor. Data types supported: Same as @p weights.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000068 *
69 * @return an error status
70 */
Gian Marco Iodice597a8562018-08-01 15:06:06 +010071 static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000072
73 // Inherited methods overridden:
74 void run() override;
75
76private:
Gian Marco Iodice597a8562018-08-01 15:06:06 +010077 NEWeightsReshapeKernel _weights_reshape_kernel;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000078};
79
Gian Marco Iodice597a8562018-08-01 15:06:06 +010080/** Basic function to compute the convolution layer. This function calls the following NEON kernels/functions:
81 *
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000082 * -# @ref NEIm2ColKernel
Gian Marco Iodice597a8562018-08-01 15:06:06 +010083 * -# @ref NEGEMM (if the data type is FP32 or FP16)
84 * -# @ref NEGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8)
85 * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint (if the data type is QASYMM8)
86 * -# @ref NEArithmeticAdditionKernel (if biases != nullptr and we have a 1x1 convolution with the NHWC data layout)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000087 * -# @ref NECol2ImKernel
Gian Marco Iodice597a8562018-08-01 15:06:06 +010088 *
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000089 */
90class NEGEMMConvolutionLayer : public IFunction
91{
92public:
93 /** Constructor */
94 NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager = nullptr);
Georgios Pinitas1562be32018-03-08 19:09:19 +000095 /** Prevent instances of this class from being copied (As this class contains pointers) */
96 NEGEMMConvolutionLayer(const NEGEMMConvolutionLayer &) = delete;
97 /** Default move constructor */
98 NEGEMMConvolutionLayer(NEGEMMConvolutionLayer &&) = default;
99 /** Prevent instances of this class from being copied (As this class contains pointers) */
100 NEGEMMConvolutionLayer &operator=(const NEGEMMConvolutionLayer &) = delete;
101 /** Default move assignment operator */
102 NEGEMMConvolutionLayer &operator=(NEGEMMConvolutionLayer &&) = default;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000103 /** Set the input and output tensors.
104 *
105 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
106 * while every optional dimension from 4 and above represent a batch of inputs.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100107 * Data types supported: QASYMM8/F32.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000108 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
109 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
110 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
111 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
112 * Data types supported: Same as @p input.
113 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
114 * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
115 * tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000116 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000117 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000118 */
Alex Gilday7da29b62018-03-23 14:16:00 +0000119 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000120 const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000121 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
122 *
123 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
124 * while every optional dimension from 4 and above represent a batch of inputs.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100125 * Data types supported: QASYMM8/F16/F32.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000126 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
127 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
128 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
129 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
130 * Data types supported: Same as @p input.
131 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
132 * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
133 * tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000134 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000135 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000136 *
137 * @return a status
138 */
139 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000140 const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo());
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000141
142 // Inherited methods overridden:
143 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +0100144 void prepare() override;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000145
146private:
147 /** Configures the appropriate matrix multiply routine
148 *
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100149 * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
150 * @param[in] weights Weights tensor. Data type supported: Same as @p input.
151 * @param[out] output Output tensor. Data types supported: Same as @p input,
152 * except for input of QASYMM8 type where output should be of S32 type.
153 * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000154 */
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100155 void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, int gemm_3d_depth = 1);
156 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer matrix multiply routines
157 *
158 * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
159 * @param[in] weights Weights tensor. Data type supported: Same as @p input.
160 * @param[in] output Output tensor. Data types supported: Same as @p input,
161 * except for input of QASYMM8 type where output should be of S32 type.
162 * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
163 * @param[in] skip_im2col (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false)
164 *
165 * @return a status
166 */
167 static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth = 1, bool skip_im2col = false);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000168
169private:
170 MemoryGroup _memory_group;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000171 NEConvolutionLayerReshapeWeights _reshape_weights;
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100172 NEIm2ColKernel _im2col_kernel;
173 NEGEMM _mm_gemm;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000174 NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
175 NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100176 NECol2ImKernel _col2im_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000177 NEActivationLayer _activationlayer_function;
Michalis Spyroue2503892018-04-23 15:17:31 +0100178 NEArithmeticAdditionKernel _add_bias_kernel;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000179
Georgios Pinitas1562be32018-03-08 19:09:19 +0000180 const ITensor *_original_weights;
181
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100182 Tensor _im2col_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000183 Tensor _weights_reshaped;
184 Tensor _gemm_output;
185 Tensor _tmp_output;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000186
Michalis Spyroue2503892018-04-23 15:17:31 +0100187 DataLayout _data_layout;
Gian Marco Iodice597a8562018-08-01 15:06:06 +0100188
189 bool _append_bias;
190 bool _skip_im2col;
191 bool _skip_col2im;
192 bool _is_quantized;
193 bool _is_activationlayer_enabled;
194 bool _is_prepared;
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000195};
196}
197#endif /* __ARM_COMPUTE_NECONVOLUTIONGEMMLAYER_H__ */