blob: 91d35ca0b2b3f7dc066924d0b63b8ec58d2538b9 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
30#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
31#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
32#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
33#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
34#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
35#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
36#include "arm_compute/core/Types.h"
37#include "arm_compute/runtime/CL/CLMemoryGroup.h"
38#include "arm_compute/runtime/CL/CLTensor.h"
39#include "arm_compute/runtime/CL/functions/CLGEMM.h"
40#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
41#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
42#include "arm_compute/runtime/IMemoryManager.h"
43
44#include <memory>
45
46namespace arm_compute
47{
48class ICLTensor;
49
50/** Function to reshape and transpose the weights. This function calls the following kernels:
51 * -# @ref CLWeightsReshapeKernel
Isabella Gottardif07d28d2018-02-06 14:52:43 +000052 */
53class CLConvolutionLayerReshapeWeights : public IFunction
54{
55public:
56 /** Constructor */
Georgios Pinitasd8734b52017-12-22 15:27:52 +000057 CLConvolutionLayerReshapeWeights();
Isabella Gottardif07d28d2018-02-06 14:52:43 +000058 /** Set the input and output tensors.
59 *
Georgios Pinitas78c00902018-01-09 17:33:11 +000060 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
61 * Data type supported: QS8/QASYMM8/QS16/F16/F32.
62 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
63 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
Isabella Gottardif07d28d2018-02-06 14:52:43 +000064 */
Georgios Pinitas78c00902018-01-09 17:33:11 +000065 void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output);
66 /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayerReshapeWeights
67 *
68 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
69 * Data type supported: QS8/QASYMM8/QS16/F16/F32.
70 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
71 * @param[in] output Destination tensor. Data types supported: Same as @p weights.
72 *
73 * @return a status
74 */
75 static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000076 // Inherited methods overridden:
77 void run() override;
78
79private:
Georgios Pinitasd8734b52017-12-22 15:27:52 +000080 CLWeightsReshapeKernel _weights_reshape_kernel;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000081};
82
83/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
84 *
85 * Note: weights already reshaped for quantized asymmetric is not supported
86 *
87 * -# @ref CLIm2ColKernel
88 * -# @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
89 * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
90 * -# @ref CLCol2ImKernel
91 *
92 * if the weights are already reshaped:
93 * -# @ref CLGEMMInterleave4x4Kernel
94 * -# @ref CLGEMMMatrixMultiplyKernel
95 * else
96 * -# @ref CLGEMM
97 */
98class CLGEMMConvolutionLayer : public IFunction
99{
100public:
Alex Gildayc357c472018-03-21 13:54:09 +0000101 /** Default constructor
102 *
103 * @param[in] memory_manager (Optional) Memory manager.
104 */
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000105 CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Georgios Pinitas1562be32018-03-08 19:09:19 +0000106 /** Prevent instances of this class from being copied (As this class contains pointers) */
107 CLGEMMConvolutionLayer(const CLGEMMConvolutionLayer &) = delete;
108 /** Default move constructor */
109 CLGEMMConvolutionLayer(CLGEMMConvolutionLayer &&) = default;
110 /** Prevent instances of this class from being copied (As this class contains pointers) */
111 CLGEMMConvolutionLayer &operator=(const CLGEMMConvolutionLayer &) = delete;
112 /** Default move assignment operator */
113 CLGEMMConvolutionLayer &operator=(CLGEMMConvolutionLayer &&) = default;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000114 /** Set the input and output tensors.
115 *
116 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
117 * while every optional dimension from 4 and above represent a batch of inputs.
118 * Data types supported: QS8/QASYMM8/QS16/F16/F32.
119 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
120 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
121 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
122 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
123 * Data types supported: Same as @p input.
124 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
125 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
126 * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000127 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000128 */
Alex Gilday7da29b62018-03-23 14:16:00 +0000129 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
130 const Size2D &dilation = Size2D(1U, 1U));
Georgios Pinitas78c00902018-01-09 17:33:11 +0000131 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer.
132 *
133 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
134 * while every optional dimension from 4 and above represent a batch of inputs.
135 * Data types supported: QS8/QASYMM8/QS16/F16/F32.
136 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
137 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
138 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
139 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
140 * Data types supported: Same as @p input.
141 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
142 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
143 * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000144 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Alex Gildayc357c472018-03-21 13:54:09 +0000145 *
146 * @return a status
Georgios Pinitas78c00902018-01-09 17:33:11 +0000147 */
148 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Alex Gilday7da29b62018-03-23 14:16:00 +0000149 const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U));
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000150
151 // Inherited methods overridden:
152 void run() override;
153
154private:
155 /** Configures the appropriate matrix multiply routine
156 *
157 * @param input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
158 * @param weights Weights tensor. Data type supported: Same as @p input.
159 * @param output Output tensor. Data types supported: Same as @p input,
160 * except for input of QASYMM8 type where output should be of S32 type.
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000161 */
Georgios Pinitas78c00902018-01-09 17:33:11 +0000162 void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output);
163 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
164 *
165 * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
166 * @param[in] weights Weights tensor. Data type supported: Same as @p input.
167 * @param[in] output Output tensor. Data types supported: Same as @p input,
168 * except for input of QASYMM8 type where output should be of S32 type.
169 *
170 * @return a status
171 */
172 static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000173
174private:
175 CLMemoryGroup _memory_group;
176 CLConvolutionLayerReshapeWeights _reshape_weights;
177 CLIm2ColKernel _im2col_kernel;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000178 CLGEMM _mm_gemm;
179 CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
180 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
181 CLCol2ImKernel _col2im_kernel;
182
Georgios Pinitas1562be32018-03-08 19:09:19 +0000183 const ICLTensor *_original_weights;
184
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000185 CLTensor _im2col_output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000186 CLTensor _weights_reshaped;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000187 CLTensor _gemm_output;
188 CLTensor _tmp_output;
189
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000190 bool _is_quantized;
Gian Marco35026132018-02-22 15:28:44 +0000191 bool _is_first_run;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000192};
193}
194#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */