blob: 56ce27457266ea97342df3be0c61b464ede307f8 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H__
25#define __ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
Giorgio Arena368e6352018-08-20 15:06:07 +010029#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/NEON/kernels/NETransposeKernel.h"
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010032#include "arm_compute/runtime/MemoryGroup.h"
Georgios Pinitasef776a82018-07-25 17:57:49 +010033#include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h"
Giorgio Arenaa855af12018-07-16 17:20:38 +010034#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
35#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
36#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010037#include "arm_compute/runtime/Tensor.h"
38
39namespace arm_compute
40{
41/** Basic function to reshape the weights of Fully Connected layer with NEON. This function calls the following kernels:
42 *
Giorgio Arenaa855af12018-07-16 17:20:38 +010043 * -# @ref NETransposeKernel
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044 *
45 * @note The fully connected layer accepts "weights" tensors only with 2 dimensions.
46 */
Michalis Spyrou95abfdd2018-11-28 14:59:47 +000047class NEFullyConnectedLayerReshapeWeights : public INESimpleFunctionNoBorder
Anthony Barbier6ff3b192017-09-04 18:44:23 +010048{
49public:
Anthony Barbier6ff3b192017-09-04 18:44:23 +010050 /** Set the input and output tensors.
51 *
Giorgio Arenaa855af12018-07-16 17:20:38 +010052 * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QASYMM8/F16/F32.
53 * @param[out] output Destination tensor. Data type supported: Same as @p input.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010054 */
Giorgio Arenaa855af12018-07-16 17:20:38 +010055 void configure(const ITensor *input, ITensor *output);
Giorgio Arena6200fa42018-07-06 17:06:36 +010056 /** Static function to check if given info will lead to a valid configuration of @ref NEFullyConnectedLayerReshapeWeights
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000057 *
Giorgio Arenaa855af12018-07-16 17:20:38 +010058 * @param[in] input Weights tensor info. The weights must be 2 dimensional. Data types supported: QASYMM8/F16/F32.
59 * @param[in] output Destination tensor info. Data type supported: Same as @p input.
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +000060 *
61 * @return a status
62 */
Giorgio Arenaa855af12018-07-16 17:20:38 +010063 static Status validate(const ITensorInfo *input, const ITensorInfo *output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064};
65
66/** Basic function to compute a Fully Connected layer on NEON. This function calls the following NEON kernels:
Giorgio Arenaa855af12018-07-16 17:20:38 +010067 * -# @ref NEIm2ColKernel (called when the input comes from a convolutional layer)
68 * -# @ref NEFullyConnectedLayerReshapeWeights (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once)
69 * -# @ref NEGEMMMatrixMultiplyKernel or @ref NEGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
70 * -# @ref NEGEMMMatrixAccumulateBiasesKernel or @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint (if quantized asymmetric) (if @p biases is not equal to nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071 *
72 * @note The fully connected layer accepts "weights" tensors only with 2 dimensions.
73 */
74class NEFullyConnectedLayer : public IFunction
75{
76public:
77 /** Constructor */
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010078 NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Georgios Pinitas1562be32018-03-08 19:09:19 +000079 /** Prevent instances of this class from being copied (As this class contains pointers) */
80 NEFullyConnectedLayer(const NEFullyConnectedLayer &) = delete;
81 /** Default move constructor */
82 NEFullyConnectedLayer(NEFullyConnectedLayer &&) = default;
83 /** Prevent instances of this class from being copied (As this class contains pointers) */
84 NEFullyConnectedLayer &operator=(const NEFullyConnectedLayer &) = delete;
85 /** Default move assignment operator */
86 NEFullyConnectedLayer &operator=(NEFullyConnectedLayer &&) = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010087 /** Set the input and output tensors.
88 *
Giorgio Arenaa855af12018-07-16 17:20:38 +010089 * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
90 * @param[in] weights Weights tensor. The weights must be 2 dimensional.
91 * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions.
92 * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension.
93 * Data type supported: Same as @p input.
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +010094 * @param[in] biases Bias tensor. Can be nullptr. Data type supported:Same as @p input.
Giorgio Arenaa855af12018-07-16 17:20:38 +010095 * @param[out] output Destination tensor. Its shape should be equal to the output of a matrix multiplication between:
96 * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer
97 * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer.
98 * Data type supported: Same as @p input.
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +010099 * @param[in] fc_info (Optional) Fully connected layer additional info
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100100 */
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100101 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
102 FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
Giorgio Arenaa855af12018-07-16 17:20:38 +0100103 /** Static function to check if given info will lead to a valid configuration of @ref NEFullyConnectedLayer
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000104 *
Giorgio Arenaa855af12018-07-16 17:20:38 +0100105 * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32.
106 * @param[in] weights Weights tensor info. The weights must be 2 dimensional.
107 * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions.
108 * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension.
109 * Data type supported: Same as @p input.
110 * @param[in] biases Bias tensor info. Can be nullptr. Data type supported:Same as @p input.
111 * @param[out] output Destination tensor info. Its shape should be equal to the output of a matrix multiplication between:
112 * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer
113 * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer.
114 * Data type supported: Same as @p input.
115 * @param[in] fc_info (Optional) Fully connected layer additional info
Ioan-Cristian Szabob4e3e1c2017-11-30 17:17:17 +0000116 *
117 * @return a status
118 */
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100119 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
120 FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100121
122 //Inherited methods override
123 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +0100124 void prepare() override;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100125
126private:
Giorgio Arenaa855af12018-07-16 17:20:38 +0100127 void configure_fc_fc(const ITensor *input, const ITensor *weights, ITensor *output);
128 void configure_conv_fc(const ITensor *input, const ITensor *weights, ITensor *output);
129 void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output);
130
131 MemoryGroup _memory_group;
Giorgio Arena368e6352018-08-20 15:06:07 +0100132 NEFlattenLayerKernel _flatten_kernel;
Georgios Pinitasef776a82018-07-25 17:57:49 +0100133 NEConvertFullyConnectedWeights _convert_weights;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100134 NEFullyConnectedLayerReshapeWeights _reshape_weights_function;
135 NEGEMM _mm_gemm;
136 NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
137 NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
138 NEGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel;
Giorgio Arena368e6352018-08-20 15:06:07 +0100139 Tensor _flatten_output;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100140 Tensor _gemmlowp_output;
Georgios Pinitasef776a82018-07-25 17:57:49 +0100141 Tensor _converted_weights_output;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100142 Tensor _reshape_weights_output;
143 const ITensor *_original_weights;
Georgios Pinitasef776a82018-07-25 17:57:49 +0100144 bool _are_weights_converted;
Giorgio Arenaa855af12018-07-16 17:20:38 +0100145 bool _are_weights_reshaped;
146 bool _is_fc_after_conv;
147 bool _accumulate_biases;
148 bool _is_quantized;
149 bool _is_prepared;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100150};
Georgios Pinitas1562be32018-03-08 19:09:19 +0000151} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100152#endif /* __ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H__ */