blob: c6ae535713d2c44df855be73f4d38e626ecbbb0e [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
Gian Marco Iodice68a3f562018-07-26 11:44:03 +010029#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000030#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
32#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
33#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
34#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
35#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
36#include "arm_compute/core/Types.h"
37#include "arm_compute/runtime/CL/CLMemoryGroup.h"
38#include "arm_compute/runtime/CL/CLTensor.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000039#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000040#include "arm_compute/runtime/CL/functions/CLGEMM.h"
41#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
42#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
Gian Marco Iodicedff601d2018-08-09 13:28:41 +010043#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000044#include "arm_compute/runtime/IMemoryManager.h"
45
46#include <memory>
47
48namespace arm_compute
49{
50class ICLTensor;
51
52/** Function to reshape and transpose the weights. This function calls the following kernels:
53 * -# @ref CLWeightsReshapeKernel
Isabella Gottardif07d28d2018-02-06 14:52:43 +000054 */
55class CLConvolutionLayerReshapeWeights : public IFunction
56{
57public:
58 /** Constructor */
Georgios Pinitasd8734b52017-12-22 15:27:52 +000059 CLConvolutionLayerReshapeWeights();
Isabella Gottardif07d28d2018-02-06 14:52:43 +000060 /** Set the input and output tensors.
61 *
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010062 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
63 * Data type supported: QASYMM8/F16/F32.
64 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
65 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
66 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Isabella Gottardif07d28d2018-02-06 14:52:43 +000067 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010068 void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
Georgios Pinitas78c00902018-01-09 17:33:11 +000069 /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayerReshapeWeights
70 *
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010071 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
72 * Data type supported: QASYMM8/F16/F32.
73 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
74 * @param[in] output Destination tensor. Data types supported: Same as @p weights.
75 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Georgios Pinitas78c00902018-01-09 17:33:11 +000076 *
77 * @return a status
78 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010079 static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000080 // Inherited methods overridden:
81 void run() override;
82
83private:
Georgios Pinitasd8734b52017-12-22 15:27:52 +000084 CLWeightsReshapeKernel _weights_reshape_kernel;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000085};
86
87/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
88 *
Isabella Gottardif07d28d2018-02-06 14:52:43 +000089 * -# @ref CLIm2ColKernel
Gian Marco Iodice68a3f562018-07-26 11:44:03 +010090 * -# @ref CLGEMM (if the data type is FP32 or FP16)
91 * -# @ref CLGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8)
92 * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint (if the data type is QASYMM8)
93 * -# @ref CLArithmeticAdditionKernel (if biases != nullptr and we have a 1x1 convolution with the NHWC data layout)
Gian Marco Iodicedff601d2018-08-09 13:28:41 +010094 * -# @ref CLCol2ImKernel (if NCHW data layout) or @ref CLReshapeLayer (if NHWC with QASYMM8)
Isabella Gottardif07d28d2018-02-06 14:52:43 +000095 */
96class CLGEMMConvolutionLayer : public IFunction
97{
98public:
Alex Gildayc357c472018-03-21 13:54:09 +000099 /** Default constructor
100 *
101 * @param[in] memory_manager (Optional) Memory manager.
102 */
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000103 CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Georgios Pinitas1562be32018-03-08 19:09:19 +0000104 /** Prevent instances of this class from being copied (As this class contains pointers) */
105 CLGEMMConvolutionLayer(const CLGEMMConvolutionLayer &) = delete;
106 /** Default move constructor */
107 CLGEMMConvolutionLayer(CLGEMMConvolutionLayer &&) = default;
108 /** Prevent instances of this class from being copied (As this class contains pointers) */
109 CLGEMMConvolutionLayer &operator=(const CLGEMMConvolutionLayer &) = delete;
110 /** Default move assignment operator */
111 CLGEMMConvolutionLayer &operator=(CLGEMMConvolutionLayer &&) = default;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000112 /** Set the input and output tensors.
113 *
114 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
115 * while every optional dimension from 4 and above represent a batch of inputs.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100116 * Data types supported: QASYMM8/F16/F32.
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000117 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
118 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
119 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
120 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
121 * Data types supported: Same as @p input.
122 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
123 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
124 * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000125 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000126 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100127 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000128 */
Alex Gilday7da29b62018-03-23 14:16:00 +0000129 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100130 const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000131 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer.
132 *
133 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
134 * while every optional dimension from 4 and above represent a batch of inputs.
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100135 * Data types supported: QASYMM8/F16/F32.
Georgios Pinitas78c00902018-01-09 17:33:11 +0000136 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
137 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
138 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
139 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
140 * Data types supported: Same as @p input.
141 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
142 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
143 * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000144 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000145 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100146 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Alex Gildayc357c472018-03-21 13:54:09 +0000147 *
148 * @return a status
Georgios Pinitas78c00902018-01-09 17:33:11 +0000149 */
150 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100151 const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000152
153 // Inherited methods overridden:
154 void run() override;
Georgios Pinitase0437672018-05-02 14:07:55 +0100155 void prepare() override;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000156
157private:
158 /** Configures the appropriate matrix multiply routine
159 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100160 * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100161 * @param[in] weights Weights tensor. Data type supported: Same as @p input.
162 * @param[in, out] output Output tensor. Data types supported: Same as @p input,
163 * except for input of QASYMM8 type where output should be of S32 type.
164 * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000165 */
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100166 void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, int gemm_3d_depth = 1);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000167 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
168 *
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100169 * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100170 * @param[in] weights Weights tensor. Data type supported: Same as @p input.
171 * @param[in] output Output tensor. Data types supported: Same as @p input,
172 * except for input of QASYMM8 type where output should be of S32 type.
173 * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100174 * @param[in] skip_im2col (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false)
Georgios Pinitas78c00902018-01-09 17:33:11 +0000175 *
176 * @return a status
177 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100178 static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth = 1, bool skip_im2col = false);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000179
180private:
181 CLMemoryGroup _memory_group;
182 CLConvolutionLayerReshapeWeights _reshape_weights;
183 CLIm2ColKernel _im2col_kernel;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000184 CLGEMM _mm_gemm;
185 CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
186 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
187 CLCol2ImKernel _col2im_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000188 CLActivationLayer _activationlayer_function;
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100189 CLArithmeticAdditionKernel _add_bias_kernel;
Gian Marco Iodicedff601d2018-08-09 13:28:41 +0100190 CLReshapeLayer _reshape_layer;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000191
Georgios Pinitas1562be32018-03-08 19:09:19 +0000192 const ICLTensor *_original_weights;
193
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000194 CLTensor _im2col_output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000195 CLTensor _weights_reshaped;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000196 CLTensor _gemm_output;
197 CLTensor _tmp_output;
198
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100199 DataLayout _data_layout;
200
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100201 bool _append_bias;
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100202 bool _skip_im2col;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000203 bool _is_quantized;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000204 bool _is_activationlayer_enabled;
Georgios Pinitase0437672018-05-02 14:07:55 +0100205 bool _is_prepared;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000206};
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100207} // namespace arm_compute
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000208#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */