blob: e07f09c58859ea42842b9f514619301ea3072b5a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Alex Gilday27c08ab2018-02-22 11:36:16 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NELOCALLYCONNECTEDLAYER_H__
25#define __ARM_COMPUTE_NELOCALLYCONNECTEDLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/NEON/kernels/NECol2ImKernel.h"
30#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h"
31#include "arm_compute/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.h"
32#include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
33#include "arm_compute/core/Types.h"
Georgios Pinitas658039b2017-09-15 16:30:50 +010034#include "arm_compute/runtime/IMemoryManager.h"
35#include "arm_compute/runtime/MemoryGroup.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include "arm_compute/runtime/Tensor.h"
37
Georgios Pinitas658039b2017-09-15 16:30:50 +010038#include <memory>
39
Anthony Barbier6ff3b192017-09-04 18:44:23 +010040namespace arm_compute
41{
42class INETensor;
43
44/** Basic function to compute the locally connected layer. This function calls the following NEON kernels:
45 *
46 * -# @ref NEWeightsReshapeKernel (executed only once for each configuration)
47 * -# @ref NEIm2ColKernel
48 * -# @ref NELocallyConnectedMatrixMultiplyKernel
49 * -# @ref NECol2ImKernel
50 */
51class NELocallyConnectedLayer : public IFunction
52{
53public:
54 /** Default constructor */
Georgios Pinitas658039b2017-09-15 16:30:50 +010055 NELocallyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010056 /** Set the input and output tensors.
57 *
58 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
59 * while every optional dimension from 4 and above represent a batch of inputs.
Pablo Telloafde7322017-07-25 09:19:46 +010060 * Data types supported: F16, F32.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010061 * @param[in] weights Weights tensor. Weights are 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches]. Data type supported:Same as @p input.
62 * @param[in] biases Biases tensor. Shared biases supported. Biases are 2D tensor with dimensions [OFM, num_patches]. Data type supported:Same as @p input.
63 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
64 * Data types supported: Same as @p input.
65 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
66 */
67 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info);
Alex Gilday27c08ab2018-02-22 11:36:16 +000068 /** Static function to check if given info will lead to a valid configuration of @ref NELocallyConnectedLayer
69 *
70 * @param[in] input Input tensor info. 3 lower dimensions represent a single input [width, height, IFM],
71 * while every optional dimension from 4 and above represent a batch of inputs.
72 * Data types supported: F16, F32.
73 * @param[in] weights Weights tensor info. Weights are 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches]. Data type supported:Same as @p input.
74 * @param[in] biases Biases tensor info. Shared biases supported. Biases are 2D tensor with dimensions [OFM, num_patches]. Data type supported:Same as @p input.
75 * @param[in] output Output tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
76 * Data types supported: Same as @p input.
77 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
78 *
79 * @return a status
80 */
81 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010082
83 // Inherited methods overridden:
84 void run() override;
85
86private:
Georgios Pinitas658039b2017-09-15 16:30:50 +010087 MemoryGroup _memory_group;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010088 NEIm2ColKernel _input_im2col_kernel;
89 NEWeightsReshapeKernel _weights_reshape_kernel;
90 NELocallyConnectedMatrixMultiplyKernel _mm_kernel;
91 NECol2ImKernel _output_col2im_kernel;
92 Tensor _input_im2col_reshaped;
93 Tensor _weights_reshaped;
94 Tensor _gemm_output;
95 bool _is_first_run;
96};
97}
98#endif /* __ARM_COMPUTE_NELOCALLYCONNECTEDLAYER_H__ */