blob: 4dbd0f828a8619f5f9488df57c9b1ea52ecd7652 [file] [log] [blame]
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Isabella Gottardif07d28d2018-02-06 14:52:43 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +010024#ifndef ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H
25#define ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H
Isabella Gottardif07d28d2018-02-06 14:52:43 +000026
27#include "arm_compute/runtime/IFunction.h"
28
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010029#include "arm_compute/core/CL/CLKernelLibrary.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000030#include "arm_compute/core/Types.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000031#include "arm_compute/runtime/CL/CLTensor.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000032#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000033#include "arm_compute/runtime/CL/functions/CLGEMM.h"
34#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000035#include "arm_compute/runtime/IMemoryManager.h"
Michalis Spyroub27e13a2019-09-27 11:04:27 +010036#include "arm_compute/runtime/ITransformWeights.h"
37#include "arm_compute/runtime/IWeightsManager.h"
Georgios Pinitas26014cf2019-09-09 19:00:57 +010038#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardif07d28d2018-02-06 14:52:43 +000039
40#include <memory>
41
42namespace arm_compute
43{
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010044class CLCol2ImKernel;
45class CLIm2ColKernel;
46class CLWeightsReshapeKernel;
Isabella Gottardif07d28d2018-02-06 14:52:43 +000047class ICLTensor;
48
49/** Function to reshape and transpose the weights. This function calls the following kernels:
50 * -# @ref CLWeightsReshapeKernel
Isabella Gottardif07d28d2018-02-06 14:52:43 +000051 */
52class CLConvolutionLayerReshapeWeights : public IFunction
53{
54public:
55 /** Constructor */
Georgios Pinitasd8734b52017-12-22 15:27:52 +000056 CLConvolutionLayerReshapeWeights();
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010057 /** Prevent instances of this class from being copied */
58 CLConvolutionLayerReshapeWeights(const CLConvolutionLayerReshapeWeights &) = delete;
59 /** Prevent instances of this class from being copied */
60 CLConvolutionLayerReshapeWeights &operator=(const CLConvolutionLayerReshapeWeights &) = delete;
Sang-Hoon Park532ac332020-11-09 10:14:03 +000061 /** Default move constructor */
62 CLConvolutionLayerReshapeWeights(CLConvolutionLayerReshapeWeights &&) = default;
63 /** Default move assignment operator */
64 CLConvolutionLayerReshapeWeights &operator=(CLConvolutionLayerReshapeWeights &&) = default;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010065 /** Default destructor */
66 ~CLConvolutionLayerReshapeWeights();
Isabella Gottardif07d28d2018-02-06 14:52:43 +000067 /** Set the input and output tensors.
68 *
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010069 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
Manuel Bottini8481d832019-12-10 15:28:40 +000070 * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010071 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
72 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
73 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Isabella Gottardif07d28d2018-02-06 14:52:43 +000074 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010075 void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
Manuel Bottini2b84be52020-04-08 10:15:51 +010076 /** Set the input and output tensors.
77 *
78 * @param[in] compile_context The compile context to be used.
79 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
80 * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
81 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
82 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
83 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
84 */
85 void configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
Georgios Pinitas78c00902018-01-09 17:33:11 +000086 /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayerReshapeWeights
87 *
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010088 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
Manuel Bottini8481d832019-12-10 15:28:40 +000089 * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010090 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
91 * @param[in] output Destination tensor. Data types supported: Same as @p weights.
92 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Georgios Pinitas78c00902018-01-09 17:33:11 +000093 *
94 * @return a status
95 */
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +010096 static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +000097 // Inherited methods overridden:
98 void run() override;
99
100private:
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100101 std::unique_ptr<CLWeightsReshapeKernel> _weights_reshape_kernel;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000102};
103
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100104namespace weights_transformations
105{
106/** Basic function to manage the reshape weights generated from @ref CLConvolutionLayerReshapeWeights */
107class CLConvolutionLayerReshapeWeightsTransform : public ITransformWeights
108{
109public:
110 /** Configures the @ref CLConvolutionLayerReshapeWeights function
111 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100112 * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
113 * @param[in] biases Biases tensor. Data type supported: same as @p input, S32 if @p input is quantized.
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100114 * @param[in] num_groups Number of groups when performing a grouped convolution.
115 */
116 void configure(const ICLTensor *input, const ICLTensor *biases, unsigned int num_groups)
117 {
Manuel Bottini2b84be52020-04-08 10:15:51 +0100118 configure(CLKernelLibrary::get().get_compile_context(), input, biases, num_groups);
119 }
120 /** Configures the @ref CLConvolutionLayerReshapeWeights function
121 *
122 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100123 * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
124 * @param[in] biases Biases tensor. Data type supported: same as @p input, S32 if @p input is quantized.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100125 * @param[in] num_groups Number of groups when performing a grouped convolution.
126 */
127 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, unsigned int num_groups)
128 {
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100129 _bias_bit = (biases != nullptr) ? 1 : 0;
130 _num_groups = num_groups;
Manuel Bottini2b84be52020-04-08 10:15:51 +0100131 _func.configure(compile_context, input, biases, &_output, num_groups);
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100132 }
133
134 //Inherited method override
135 void run() override
136 {
137 _output.allocator()->allocate();
138 _func.run();
139 _reshape_run = true;
140 }
141
142 //Inherited method override
143 ICLTensor *get_weights() override
144 {
145 return &_output;
146 }
147
148 //Inherited method override
149 void release() override
150 {
151 _output.allocator()->free();
152 }
153
154 //Inherited method override
155 uint32_t uid() override
156 {
157 return ((0x9) | (_bias_bit << 7) | (_num_groups << 8));
158 }
159
160private:
161 CLTensor _output{};
162 CLConvolutionLayerReshapeWeights _func{};
163 int32_t _bias_bit{ 0 };
164 unsigned int _num_groups{ 0 };
165};
166} // namespace weights_transformations
167
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000168/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
169 *
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000170 * -# @ref CLIm2ColKernel
Gian Marco Iodice68a3f562018-07-26 11:44:03 +0100171 * -# @ref CLGEMM (if the data type is FP32 or FP16)
Michele Di Giorgioba14c922020-10-12 13:27:57 +0100172 * -# @ref CLGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8/QASYMM8_SIGNED)
173 * -# @ref CLGEMMLowpOutputStage with QUANTIZE_DOWN_FIXEDPOINT type of quantization (if the data type is QASYMM8/QASYMM8_SIGNED)
Georgios Pinitas932491f2018-09-21 16:33:15 +0100174 * -# @ref CLCol2ImKernel (if NCHW data layout)
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000175 */
176class CLGEMMConvolutionLayer : public IFunction
177{
178public:
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100179 /** Constructor
Alex Gildayc357c472018-03-21 13:54:09 +0000180 *
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100181 * @param[in] memory_manager (Optional) Memory manager.
182 * @param[in] weights_manager (Optional) Weights manager.
Alex Gildayc357c472018-03-21 13:54:09 +0000183 */
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100184 CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
Georgios Pinitas1562be32018-03-08 19:09:19 +0000185 /** Prevent instances of this class from being copied (As this class contains pointers) */
186 CLGEMMConvolutionLayer(const CLGEMMConvolutionLayer &) = delete;
187 /** Default move constructor */
188 CLGEMMConvolutionLayer(CLGEMMConvolutionLayer &&) = default;
189 /** Prevent instances of this class from being copied (As this class contains pointers) */
190 CLGEMMConvolutionLayer &operator=(const CLGEMMConvolutionLayer &) = delete;
191 /** Default move assignment operator */
192 CLGEMMConvolutionLayer &operator=(CLGEMMConvolutionLayer &&) = default;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100193 /**Default destructor */
194 ~CLGEMMConvolutionLayer();
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000195 /** Set the input and output tensors.
196 *
197 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
198 * while every optional dimension from 4 and above represent a batch of inputs.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100199 * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000200 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100201 * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000202 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100203 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000204 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
205 * Data types supported: Same as @p input.
206 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
207 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
Gian Marco Iodice5fc07aa2019-05-15 17:08:02 +0100208 * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000209 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000210 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100211 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000212 */
Alex Gilday7da29b62018-03-23 14:16:00 +0000213 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100214 const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100215 /** Set the input and output tensors.
216 *
217 * @param[in] compile_context The compile context to be used.
218 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
219 * while every optional dimension from 4 and above represent a batch of inputs.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100220 * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100221 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100222 * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100223 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100224 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
Manuel Bottini2b84be52020-04-08 10:15:51 +0100225 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
226 * Data types supported: Same as @p input.
227 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
228 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
229 * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input.
230 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
231 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
232 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
233 */
234 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
235 const WeightsInfo &weights_info = WeightsInfo(),
236 const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000237 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer.
238 *
239 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
240 * while every optional dimension from 4 and above represent a batch of inputs.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100241 * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000242 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100243 * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
Georgios Pinitas78c00902018-01-09 17:33:11 +0000244 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100245 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
Georgios Pinitas78c00902018-01-09 17:33:11 +0000246 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
247 * Data types supported: Same as @p input.
248 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
249 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
Gian Marco Iodice5fc07aa2019-05-15 17:08:02 +0100250 * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input.
Alex Gilday7da29b62018-03-23 14:16:00 +0000251 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000252 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100253 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
Alex Gildayc357c472018-03-21 13:54:09 +0000254 *
255 * @return a status
Georgios Pinitas78c00902018-01-09 17:33:11 +0000256 */
257 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Gian Marco Iodice916d1bc2018-08-13 11:20:41 +0100258 const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000259
260 // Inherited methods overridden:
261 void run() override;
Georgios Pinitase0437672018-05-02 14:07:55 +0100262 void prepare() override;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000263
264private:
265 /** Configures the appropriate matrix multiply routine
266 *
Manuel Bottini2b84be52020-04-08 10:15:51 +0100267 * @param[in] compile_context The compile context to be used.
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100268 * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
269 * @param[in] weights Weights tensor. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or
270 * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100271 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100272 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
273 * @param[in, out] output Output tensor. Data types supported: same as @p input.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100274 * @param[in] gemmlowp_output_stage GEMMLowp output stage info
Michele Di Giorgioebc3a902018-11-16 16:04:25 +0000275 * @param[in] gemm_3d_depth Depth of GEMM 3D
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100276 * @param[in] act_info Activation to apply after the matrix multiplication
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000277 */
Manuel Bottini2b84be52020-04-08 10:15:51 +0100278 void configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
279 const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
280 int gemm_3d_depth, const ActivationLayerInfo &act_info);
Georgios Pinitas78c00902018-01-09 17:33:11 +0000281 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
282 *
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100283 * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
284 * @param[in] weights Weights tensor info. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or
285 * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
Vidhya Sudhan Loganathan951b8a42019-11-04 14:42:08 +0000286 * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
Michele Di Giorgiof6f78762020-07-06 11:27:21 +0100287 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
288 * @param[in] output Output tensor info. Data types supported: same as @p input.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100289 * @param[in] gemmlowp_output_stage GEMMLowp output stage info
Michele Di Giorgioebc3a902018-11-16 16:04:25 +0000290 * @param[in] gemm_3d_depth Depth of GEMM 3D
291 * @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout.
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100292 * @param[in] act_info Activation to apply after the matrix multiplication
Georgios Pinitas78c00902018-01-09 17:33:11 +0000293 *
294 * @return a status
295 */
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100296 static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100297 int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info);
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000298
299private:
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100300 MemoryGroup _memory_group;
301 IWeightsManager *_weights_manager;
302 CLConvolutionLayerReshapeWeights _reshape_weights;
303 weights_transformations::CLConvolutionLayerReshapeWeightsTransform _reshape_weights_managed;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100304 std::unique_ptr<CLIm2ColKernel> _im2col_kernel;
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100305 CLGEMM _mm_gemm;
306 CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +0100307 std::unique_ptr<CLCol2ImKernel> _col2im_kernel;
Michalis Spyroub27e13a2019-09-27 11:04:27 +0100308 CLActivationLayer _activationlayer_function;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000309
Georgios Pinitas1562be32018-03-08 19:09:19 +0000310 const ICLTensor *_original_weights;
311
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000312 CLTensor _im2col_output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000313 CLTensor _weights_reshaped;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000314 CLTensor _gemm_output;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000315
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100316 bool _skip_im2col;
Georgios Pinitas932491f2018-09-21 16:33:15 +0100317 bool _skip_col2im;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000318 bool _is_quantized;
Gian Marco Iodicef3622be2019-07-29 14:27:16 +0100319 bool _fuse_activation;
Georgios Pinitase0437672018-05-02 14:07:55 +0100320 bool _is_prepared;
Isabella Gottardif07d28d2018-02-06 14:52:43 +0000321};
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100322} // namespace arm_compute
Michele Di Giorgio14cbfb22019-10-23 10:53:10 +0100323#endif /* ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H */