blob: 4a8fe61614edb738e916f9bd4f3d1d0092814baa [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Georgios Pinitas5ce897f2020-04-29 11:44:10 +01002 * Copyright (c) 2017-2020 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H
25#define ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H
Pablo Tello89519332017-11-17 11:52:36 +000026
27#include "arm_compute/runtime/IFunction.h"
28
Pablo Tellof6c572c2018-02-14 12:47:30 +000029#include "arm_compute/core/NEON/INEKernel.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "arm_compute/core/Types.h"
Pablo Tello02541fb2017-12-15 09:48:59 +000031#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
Pablo Tello89519332017-11-17 11:52:36 +000032#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000033#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
Pablo Telloa518f302018-09-19 11:33:03 +010034#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
35
Pablo Tello89519332017-11-17 11:52:36 +000036#include "arm_compute/runtime/Tensor.h"
37
38#include <memory>
39
40namespace arm_compute
41{
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010042// Forward declarations
Pablo Tello89519332017-11-17 11:52:36 +000043class ITensor;
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010044
Pablo Tello89519332017-11-17 11:52:36 +000045/** Basic function to simulate a convolution layer. This function calls the following NEON kernels:
Pablo Tellof6c572c2018-02-14 12:47:30 +000046 * -# @ref NEWinogradLayerTransformWeightsKernel (executed only once in the first call to the run() method )
47 * -# @ref NEWinogradLayerTransformInputKernel
48 * -# @ref NEWinogradLayerTransformOutputKernel
Anthony Barbiereaefd002018-07-20 17:49:35 +010049 * -# @ref NEGEMMAssemblyDispatch
Pablo Tellof6c572c2018-02-14 12:47:30 +000050 * -# @ref CPPPermute (three times: weights, input and output)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010051 *
52 * @note Some Winograd configurations (i.e. F(2x2, 5x5), F(4x4, 5x5)) are supported only with enable_fast_math = true
Pablo Tello89519332017-11-17 11:52:36 +000053 */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010054class NEWinogradConvolutionLayer : public IFunction
Pablo Tello89519332017-11-17 11:52:36 +000055{
56public:
57 /** Constructor */
Michalis Spyroua4f378d2019-04-26 14:54:54 +010058 NEWinogradConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager = nullptr);
Pablo Tello89519332017-11-17 11:52:36 +000059
60 /** Set the input and output tensors.
61 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010062 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
63 * while every optional dimension from 4 and above represent a batch of inputs.
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010064 * Data types supported: F16/F32.
Giorgio Arenaa3221e62018-05-03 15:57:48 +010065 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
66 * Currently only 3x3 and 5x5 kernels are supported.
67 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
68 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
69 * Data types supported: Same as @p input.
70 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
71 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
72 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
73 * available which may introduce a drop of accuracy as well. Default is false
Pablo Tello89519332017-11-17 11:52:36 +000074 */
Giorgio Arenaa3221e62018-05-03 15:57:48 +010075 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
76 bool enable_fast_math = false);
Pablo Tello89519332017-11-17 11:52:36 +000077
78 // Inherited methods overridden:
79 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +010080 void prepare() override;
Pablo Tello89519332017-11-17 11:52:36 +000081
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000082 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
83 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010084 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
85 * while every optional dimension from 4 and above represent a batch of inputs.
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010086 * Data types supported: F16/F32.
Giorgio Arenaa3221e62018-05-03 15:57:48 +010087 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
88 * Currently only 3x3 and 5x5 kernels are supported.
89 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
90 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
91 * Data types supported: Same as @p input.
92 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
93 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
94 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
95 * available which may introduce a drop of accuracy as well. Default is false
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000096 *
97 * @return a status
98 */
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010099 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100100 const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000101
Pablo Tello89519332017-11-17 11:52:36 +0000102 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100103 NEWinogradConvolutionLayer(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000104 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100105 NEWinogradConvolutionLayer &operator=(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000106
107private:
Anthony Barbier578225e2018-07-16 18:00:11 +0100108 MemoryGroup _memory_group;
Pablo Telloa518f302018-09-19 11:33:03 +0100109 NEGEMM _gemm_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000110 std::unique_ptr<INEKernel> _transform_input_kernel;
111 std::unique_ptr<INEKernel> _transform_output_kernel;
112 std::unique_ptr<INEKernel> _transform_weights_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000113 NEActivationLayer _activationlayer_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000114
Pablo Tello52140b42018-01-30 14:48:11 +0000115 CPPPermute _permute_input;
116 CPPPermute _permute_weights;
117 CPPPermute _permute_output;
Pablo Tello8f43d742019-03-27 09:28:32 +0000118 Tensor _input_transformed;
119 Tensor _output_transformed;
Pablo Tello52140b42018-01-30 14:48:11 +0000120 Tensor _input_workspace;
121 Tensor _output_workspace;
122 Tensor _kernel_storage;
123 Tensor _input_nhwc;
124 Tensor _output_nhwc;
125 Tensor _weights_hwio;
126 const ITensor *_input;
127 const ITensor *_weights;
128 ITensor *_output;
Georgios Pinitas72219332018-06-05 14:56:06 +0100129 bool _is_prepared;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000130 bool _is_activationlayer_enabled;
Pablo Tello89519332017-11-17 11:52:36 +0000131};
Georgios Pinitas5ce897f2020-04-29 11:44:10 +0100132} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000133#endif /* ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H */