blob: f9ebf608cbef7c6141152d0f2ec20014dc2d730f [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2017-2021 Arm Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H
25#define ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H
Pablo Tello89519332017-11-17 11:52:36 +000026
27#include "arm_compute/runtime/IFunction.h"
28
Pablo Tello89519332017-11-17 11:52:36 +000029#include "arm_compute/core/Types.h"
Pablo Tello02541fb2017-12-15 09:48:59 +000030#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
Pablo Tello89519332017-11-17 11:52:36 +000031#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000032#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
Pablo Telloa518f302018-09-19 11:33:03 +010033#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
34
Pablo Tello89519332017-11-17 11:52:36 +000035#include "arm_compute/runtime/Tensor.h"
36
37#include <memory>
38
39namespace arm_compute
40{
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010041// Forward declarations
Pablo Tello89519332017-11-17 11:52:36 +000042class ITensor;
Michalis Spyrouebcebf12020-10-21 00:04:14 +010043class ICPPKernel;
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010044
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +000045/** Basic function to simulate a convolution layer. This function calls the following kernels:
46 *
Pablo Tellof6c572c2018-02-14 12:47:30 +000047 * -# @ref NEWinogradLayerTransformWeightsKernel (executed only once in the first call to the run() method )
48 * -# @ref NEWinogradLayerTransformInputKernel
49 * -# @ref NEWinogradLayerTransformOutputKernel
Sang-Hoon Park4f7693d2021-05-12 13:59:10 +010050 * -# @ref cpu::CpuGemmAssemblyDispatch
Pablo Tellof6c572c2018-02-14 12:47:30 +000051 * -# @ref CPPPermute (three times: weights, input and output)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010052 *
53 * @note Some Winograd configurations (i.e. F(2x2, 5x5), F(4x4, 5x5)) are supported only with enable_fast_math = true
Pablo Tello89519332017-11-17 11:52:36 +000054 */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010055class NEWinogradConvolutionLayer : public IFunction
Pablo Tello89519332017-11-17 11:52:36 +000056{
57public:
58 /** Constructor */
Michalis Spyroua4f378d2019-04-26 14:54:54 +010059 NEWinogradConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager = nullptr);
Michalis Spyrouebcebf12020-10-21 00:04:14 +010060 /** Prevent instances of this class from being moved (As this class contains non movable objects) */
61 NEWinogradConvolutionLayer(NEWinogradConvolutionLayer &&) = delete;
62 /** Prevent instances of this class from being moved (As this class contains non movable objects) */
63 NEWinogradConvolutionLayer &operator=(NEWinogradConvolutionLayer &&) = delete;
64 /** Default destructor */
65 ~NEWinogradConvolutionLayer() = default;
Pablo Tello89519332017-11-17 11:52:36 +000066
67 /** Set the input and output tensors.
68 *
Teresa Charlin62687422021-04-28 10:58:49 +010069 * Valid data layouts:
70 * - NHWC
71 * - NCHW
72 *
73 * Valid data type configurations:
74 * |src0 |src1 |src2 |dst |
75 * |:--------------|:--------------|:------|:--------------|
76 * |F16 |F16 |F16 |F16 |
77 * |F32 |F32 |F32 |F32 |
78 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010079 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
80 * while every optional dimension from 4 and above represent a batch of inputs.
Georgios Pinitas5ce897f2020-04-29 11:44:10 +010081 * Data types supported: F16/F32.
Giorgio Arenaa3221e62018-05-03 15:57:48 +010082 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
83 * Currently only 3x3 and 5x5 kernels are supported.
84 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
85 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
86 * Data types supported: Same as @p input.
87 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
88 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
89 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
90 * available which may introduce a drop of accuracy as well. Default is false
Pablo Tello89519332017-11-17 11:52:36 +000091 */
Giorgio Arenaa3221e62018-05-03 15:57:48 +010092 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
93 bool enable_fast_math = false);
Pablo Tello89519332017-11-17 11:52:36 +000094
95 // Inherited methods overridden:
96 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +010097 void prepare() override;
Pablo Tello89519332017-11-17 11:52:36 +000098
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000099 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
100 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100101 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
102 * while every optional dimension from 4 and above represent a batch of inputs.
Georgios Pinitas5ce897f2020-04-29 11:44:10 +0100103 * Data types supported: F16/F32.
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100104 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
105 * Currently only 3x3 and 5x5 kernels are supported.
106 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
107 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
108 * Data types supported: Same as @p input.
109 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
110 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
111 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
112 * available which may introduce a drop of accuracy as well. Default is false
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000113 *
114 * @return a status
115 */
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +0100116 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100117 const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000118
Pablo Tello89519332017-11-17 11:52:36 +0000119 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100120 NEWinogradConvolutionLayer(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000121 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100122 NEWinogradConvolutionLayer &operator=(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000123
124private:
Michalis Spyrouebcebf12020-10-21 00:04:14 +0100125 MemoryGroup _memory_group;
126 NEGEMM _gemm_function;
127 std::unique_ptr<ICPPKernel> _transform_input_kernel;
128 std::unique_ptr<ICPPKernel> _transform_output_kernel;
129 std::unique_ptr<ICPPKernel> _transform_weights_kernel;
130 NEActivationLayer _activationlayer_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000131
Pablo Tello52140b42018-01-30 14:48:11 +0000132 CPPPermute _permute_input;
133 CPPPermute _permute_weights;
134 CPPPermute _permute_output;
Pablo Tello8f43d742019-03-27 09:28:32 +0000135 Tensor _input_transformed;
136 Tensor _output_transformed;
Pablo Tello52140b42018-01-30 14:48:11 +0000137 Tensor _input_workspace;
138 Tensor _output_workspace;
139 Tensor _kernel_storage;
140 Tensor _input_nhwc;
141 Tensor _output_nhwc;
142 Tensor _weights_hwio;
143 const ITensor *_input;
144 const ITensor *_weights;
145 ITensor *_output;
Georgios Pinitas72219332018-06-05 14:56:06 +0100146 bool _is_prepared;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000147 bool _is_activationlayer_enabled;
Manuel Bottinica62c6f2021-03-23 11:50:34 +0000148 DataLayout _data_layout;
Pablo Tello89519332017-11-17 11:52:36 +0000149};
Georgios Pinitas5ce897f2020-04-29 11:44:10 +0100150} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000151#endif /* ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H */