blob: 6a40396f9acdaa4743c72593108f600671b680a8 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
30#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
31#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
32#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
33#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
34#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
35#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
36#include "arm_compute/core/Types.h"
37#include "arm_compute/core/Types.h"
38#include "arm_compute/runtime/CL/CLTensor.h"
39#include "arm_compute/runtime/CL/CLTensor.h"
40
41namespace arm_compute
42{
43class ICLTensor;
44
45/** Function to reshape and transpose the weights. This function calls the following kernels:
46 * -# @ref CLWeightsReshapeKernel
47 * -# @ref CLGEMMTranspose1xWKernel
48 */
49class CLConvolutionLayerReshapeWeights : public IFunction
50{
51public:
52 /** Constructor */
53 CLConvolutionLayerReshapeWeights();
54 /** Set the input and output tensors.
55 *
56 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: F32.
57 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
58 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
59 * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
60 * Data types supported: Same as @p weights.
61 */
62 void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose1xW);
63 // Inherited methods overridden:
64 void run() override;
65
66private:
67 CLConvolutionLayerWeightsReshapeKernel _weights_reshape_kernel;
68 CLGEMMTranspose1xWKernel _weights_transposed_kernel;
69 CLTensor _weights_reshaped;
70 bool _transpose1xW;
71};
72
73/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels:
74 *
75 * -# @ref CLConvolutionLayerWeightsReshapeKernel (executed only once for each configuration)
76 * -# @ref CLGEMMTranspose1xWKernel (executed only once for each configuration)
77 * -# @ref CLIm2ColKernel
78 * -# @ref CLGEMMInterleave4x4Kernel
79 * -# @ref CLGEMMMatrixMultiplyKernel
80 * -# @ref CLCol2ImKernel
81 */
82class CLConvolutionLayer : public IFunction
83{
84public:
85 /** Default constructor */
86 CLConvolutionLayer();
87 /** Set the input and output tensors.
88 *
89 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
90 * while every optional dimension from 4 and above represent a batch of inputs.
91 * Data types supported: F16, F32.
92 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
93 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
94 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
95 * Data types supported: Same as @p input.
96 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
97 * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
98 * tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
99 */
100 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
101
102 // Inherited methods overridden:
103 void run() override;
104
105private:
106 CLConvolutionLayerReshapeWeights _reshape_weights;
107 CLIm2ColKernel _input_im2col_kernel;
108 CLGEMMInterleave4x4Kernel _input_interleave_kernel;
109 CLGEMMMatrixMultiplyKernel _mm_kernel;
110 CLCol2ImKernel _output_col2im_kernel;
111 CLTensor _input_im2col_reshaped;
112 CLTensor _input_interleaved_reshaped;
113 CLTensor _weights_reshaped;
114 CLTensor _weights_transposed;
115 CLTensor _gemm_output;
116 bool _has_bias;
117 bool _is_fully_connected_convolution;
118 bool _are_weights_reshaped;
119};
120}
121#endif /* __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__ */