blob: f6672cef1d96d8925d12017b74f570a87c7bac92 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco20d78482018-01-11 15:10:58 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
30#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
31#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
32#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
33#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
34#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
35#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
36#include "arm_compute/core/Types.h"
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010037#include "arm_compute/runtime/CL/CLMemoryGroup.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010038#include "arm_compute/runtime/CL/CLTensor.h"
Gian Marco20d78482018-01-11 15:10:58 +000039#include "arm_compute/runtime/CL/functions/CLGEMM.h"
Chunosov5124be52017-11-22 20:42:13 +070040#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
41#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010042#include "arm_compute/runtime/IMemoryManager.h"
43
44#include <memory>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010045
46namespace arm_compute
47{
48class ICLTensor;
49
50/** Function to reshape and transpose the weights. This function calls the following kernels:
51 * -# @ref CLWeightsReshapeKernel
52 * -# @ref CLGEMMTranspose1xWKernel
53 */
54class CLConvolutionLayerReshapeWeights : public IFunction
55{
56public:
57 /** Constructor */
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010058 CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010059 /** Set the input and output tensors.
60 *
Chunosov5124be52017-11-22 20:42:13 +070061 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
62 * Data type supported: QS8/QASYMM8/QS16/F16/F32.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010063 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
64 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
65 * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
66 * Data types supported: Same as @p weights.
67 */
68 void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW);
69 // Inherited methods overridden:
70 void run() override;
71
72private:
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010073 CLMemoryGroup _memory_group;
Gian Marco Iodice5cb4c422017-06-23 10:38:25 +010074 CLWeightsReshapeKernel _weights_reshape_kernel;
75 CLGEMMTranspose1xWKernel _weights_transposed_kernel;
76 CLTensor _weights_reshaped;
77 bool _transpose1xW;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010078};
79
Gian Marco20d78482018-01-11 15:10:58 +000080/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
Anthony Barbier6ff3b192017-09-04 18:44:23 +010081 *
Gian Marco20d78482018-01-11 15:10:58 +000082 * Note: weights already reshaped for quantized asymmetric is not supported
83 *
Anthony Barbier6ff3b192017-09-04 18:44:23 +010084 * -# @ref CLIm2ColKernel
Gian Marco20d78482018-01-11 15:10:58 +000085 * -# @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
Chunosov5124be52017-11-22 20:42:13 +070086 * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010087 * -# @ref CLCol2ImKernel
Gian Marco20d78482018-01-11 15:10:58 +000088 *
89 * if the weights are already reshaped:
90 * -# @ref CLGEMMInterleave4x4Kernel
91 * -# @ref CLGEMMMatrixMultiplyKernel
92 * else
93 * -# @ref CLGEMM
Anthony Barbier6ff3b192017-09-04 18:44:23 +010094 */
95class CLConvolutionLayer : public IFunction
96{
97public:
98 /** Default constructor */
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010099 CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100100 /** Set the input and output tensors.
101 *
102 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
103 * while every optional dimension from 4 and above represent a batch of inputs.
Chunosov5124be52017-11-22 20:42:13 +0700104 * Data types supported: QS8/QASYMM8/QS16/F16/F32.
105 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
106 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
107 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100108 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
109 * Data types supported: Same as @p input.
110 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
Giorgio Arena39725282017-12-12 15:04:43 +0000111 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
112 * tensor has also been transposed with CLGEMMTranspose1xWKernel. Data type supported: Same as @p input.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100113 */
114 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
115
116 // Inherited methods overridden:
117 void run() override;
118
119private:
Chunosov5124be52017-11-22 20:42:13 +0700120 /** Configures the appropriate matrix multiply routine
121 *
122 * @param input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
123 * @param weights Weights tensor. Data type supported: Same as @p input.
124 * @param output Output tensor. Data types supported: Same as @p input,
125 * except for input of QASYMM8 type where output should be of S32 type.
126 * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed
127 */
Gian Marco20d78482018-01-11 15:10:58 +0000128 void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed, bool are_weights_reshaped);
Chunosov5124be52017-11-22 20:42:13 +0700129
130private:
131 CLMemoryGroup _memory_group;
132 CLConvolutionLayerReshapeWeights _reshape_weights;
Gian Marco20d78482018-01-11 15:10:58 +0000133 CLIm2ColKernel _im2col_kernel;
134 CLGEMMInterleave4x4Kernel _interleave_kernel;
Chunosov5124be52017-11-22 20:42:13 +0700135 CLGEMMMatrixMultiplyKernel _mm_kernel;
Gian Marco20d78482018-01-11 15:10:58 +0000136 CLGEMM _mm_gemm;
Chunosov5124be52017-11-22 20:42:13 +0700137 CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
138 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
Gian Marco20d78482018-01-11 15:10:58 +0000139 CLCol2ImKernel _col2im_kernel;
Chunosov5124be52017-11-22 20:42:13 +0700140
Gian Marco20d78482018-01-11 15:10:58 +0000141 CLTensor _im2col_output;
142 CLTensor _interleave_output;
Chunosov5124be52017-11-22 20:42:13 +0700143 CLTensor _weights_reshaped;
144 CLTensor _weights_transposed;
145 CLTensor _gemm_output;
146 CLTensor _tmp_output;
147
Chunosov5124be52017-11-22 20:42:13 +0700148 bool _are_weights_reshaped;
149 bool _is_quantized;
Gian Marco1d25ed52017-12-16 19:33:50 +0000150 bool _is_interleaved_transposed;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100151};
152}
153#endif /* __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__ */