blob: 2bac982d0c6e136fcbe9a35f7d9fb6cbb8f44ab5 [file] [log] [blame]
Stephen Lie855c232018-01-04 14:13:22 +08001/*
2 * Copyright (c) 2017-2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#ifndef __ARM_COMPUTE_GCCONVOLUTIONLAYER_H__
26#define __ARM_COMPUTE_GCCONVOLUTIONLAYER_H__
27
28#include "arm_compute/core/GLES_COMPUTE/kernels/GCCol2ImKernel.h"
29#include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h"
30#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMInterleave4x4Kernel.h"
31#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h"
32#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.h"
33#include "arm_compute/core/GLES_COMPUTE/kernels/GCIm2ColKernel.h"
34#include "arm_compute/core/GLES_COMPUTE/kernels/GCWeightsReshapeKernel.h"
35#include "arm_compute/core/Types.h"
Michalis Spyrou9e9cbaf2018-03-15 14:41:34 +000036#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryGroup.h"
Stephen Lie855c232018-01-04 14:13:22 +080037#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
38#include "arm_compute/runtime/IFunction.h"
39
40#include <memory>
41
42namespace arm_compute
43{
44class IGCTensor;
45
46/** Function to reshape and transpose the weights. This function calls the following kernels:
47 * -# @ref GCWeightsReshapeKernel
48 * -# @ref GCGEMMTranspose1xWKernel
49 */
50class GCConvolutionLayerReshapeWeights : public IFunction
51{
52public:
53 /** Constructor */
54 GCConvolutionLayerReshapeWeights();
55 /** Set the input and output tensors.
56 *
57 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
58 * Data type supported: F16/F32.
59 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
60 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
61 * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
62 * Data types supported: Same as @p weights.
63 */
64 void configure(const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, bool transpose1xW);
65 // Inherited methods overridden:
66 void run() override;
67
68private:
69 GCWeightsReshapeKernel _weights_reshape_kernel;
70 GCGEMMTranspose1xWKernel _weights_transposed_kernel;
71 GCTensor _weights_reshaped;
72 bool _transpose1xW;
73};
74
75/** Basic function to compute the convolution layer. This function calls the following GLES kernels:
76 *
77 * -# @ref GCWeightsReshapeKernel (executed only once for each configuration)
78 * -# @ref GCGEMMTranspose1xWKernel (executed only once for each configuration)
79 * -# @ref GCIm2ColKernel
80 * -# @ref GCGEMMInterleave4x4Kernel
81 * -# @ref GCCol2ImKernel
82 */
83class GCConvolutionLayer : public IFunction
84{
85public:
86 /** Default constructor */
Michalis Spyrou9e9cbaf2018-03-15 14:41:34 +000087 GCConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Stephen Lie855c232018-01-04 14:13:22 +080088
89 /** Set the input and output tensors.
90 *
91 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
92 * while every optional dimension from 4 and above represent a batch of inputs.
93 * Data types supported: F16/F32.
94 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
95 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
96 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
97 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
98 * Data types supported: Same as @p input.
99 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
100 * @param[in] weights_info Specifies if the weights tensor has been reshaped with GCWeightsReshapeKernel. If this is not part of the fully connected layer the weights
101 * tensor has also been transposed with GCGEMMTranspose1xWKernel. Data type supported: Same as @p input.
102 */
103 void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
104
105 // Inherited methods overridden:
106 void run() override;
107
108private:
109 /** Configures the appropriate matrix multiply routine
110 *
111 * @param input Input tensor. Data types supported: F16/F32.
112 * @param weights Weights tensor. Data type supported: Same as @p input.
113 * @param output Output tensor. Data types supported: Same as @p input,
114 * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed
115 */
116 void configure_mm(const IGCTensor *input, const IGCTensor *weights, IGCTensor *output, bool is_interleaved_transposed = true);
117
118private:
Michalis Spyrou9e9cbaf2018-03-15 14:41:34 +0000119 GCMemoryGroup _memory_group;
Stephen Lie855c232018-01-04 14:13:22 +0800120 GCConvolutionLayerReshapeWeights _reshape_weights;
121 GCIm2ColKernel _input_im2col_kernel;
122 GCGEMMInterleave4x4Kernel _input_interleave_kernel;
123 GCGEMMMatrixMultiplyKernel _mm_kernel;
124 GCCol2ImKernel _output_col2im_kernel;
125 GCFillBorderKernel _fill_border;
126
127 GCTensor _input_im2col_reshaped;
128 GCTensor _input_interleaved_reshaped;
129 GCTensor _weights_reshaped;
130 GCTensor _weights_transposed;
131 GCTensor _gemm_output;
132 GCTensor _tmp_output;
133
134 bool _append_bias;
135 bool _is_fully_connected_convolution;
136 bool _are_weights_reshaped;
137};
138}
139
140#endif /* __ARM_COMPUTE_GCCONVOLUTIONLAYER_H__ */