blob: 1ddbacf327b7d5c9423e437ce7709a580613395a [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Pablo Tello8f43d742019-03-27 09:28:32 +00002 * Copyright (c) 2017-2019 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010024#ifndef __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__
Pablo Tello89519332017-11-17 11:52:36 +000026
27#include "arm_compute/runtime/IFunction.h"
28
Pablo Tellof6c572c2018-02-14 12:47:30 +000029#include "arm_compute/core/NEON/INEKernel.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "arm_compute/core/Types.h"
Pablo Tello02541fb2017-12-15 09:48:59 +000031#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
Pablo Tello89519332017-11-17 11:52:36 +000032#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000033#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
Pablo Telloa518f302018-09-19 11:33:03 +010034#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
35
Pablo Tello89519332017-11-17 11:52:36 +000036#include "arm_compute/runtime/Tensor.h"
37
38#include <memory>
39
40namespace arm_compute
41{
42class ITensor;
43/** Basic function to simulate a convolution layer. This function calls the following NEON kernels:
Pablo Tellof6c572c2018-02-14 12:47:30 +000044 * -# @ref NEWinogradLayerTransformWeightsKernel (executed only once in the first call to the run() method )
45 * -# @ref NEWinogradLayerTransformInputKernel
46 * -# @ref NEWinogradLayerTransformOutputKernel
Anthony Barbiereaefd002018-07-20 17:49:35 +010047 * -# @ref NEGEMMAssemblyDispatch
Pablo Tellof6c572c2018-02-14 12:47:30 +000048 * -# @ref CPPPermute (three times: weights, input and output)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010049 *
50 * @note Some Winograd configurations (i.e. F(2x2, 5x5), F(4x4, 5x5)) are supported only with enable_fast_math = true
Pablo Tello89519332017-11-17 11:52:36 +000051 */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010052class NEWinogradConvolutionLayer : public IFunction
Pablo Tello89519332017-11-17 11:52:36 +000053{
54public:
55 /** Constructor */
Michalis Spyroua4f378d2019-04-26 14:54:54 +010056 NEWinogradConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager = nullptr);
Pablo Tello89519332017-11-17 11:52:36 +000057
58 /** Set the input and output tensors.
59 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010060 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
61 * while every optional dimension from 4 and above represent a batch of inputs.
62 * Data types supported: F32.
63 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
64 * Currently only 3x3 and 5x5 kernels are supported.
65 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
66 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
67 * Data types supported: Same as @p input.
68 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
69 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
70 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
71 * available which may introduce a drop of accuracy as well. Default is false
Pablo Tello89519332017-11-17 11:52:36 +000072 */
Giorgio Arenaa3221e62018-05-03 15:57:48 +010073 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
74 bool enable_fast_math = false);
Pablo Tello89519332017-11-17 11:52:36 +000075
76 // Inherited methods overridden:
77 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +010078 void prepare() override;
Pablo Tello89519332017-11-17 11:52:36 +000079
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000080 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
81 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010082 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
83 * while every optional dimension from 4 and above represent a batch of inputs.
84 * Data types supported: F32.
85 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
86 * Currently only 3x3 and 5x5 kernels are supported.
87 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
88 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
89 * Data types supported: Same as @p input.
90 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
91 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
92 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
93 * available which may introduce a drop of accuracy as well. Default is false
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000094 *
95 * @return a status
96 */
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010097 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Giorgio Arenaa3221e62018-05-03 15:57:48 +010098 const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000099
Pablo Tello89519332017-11-17 11:52:36 +0000100 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100101 NEWinogradConvolutionLayer(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000102 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100103 NEWinogradConvolutionLayer &operator=(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000104
105private:
Anthony Barbier578225e2018-07-16 18:00:11 +0100106 MemoryGroup _memory_group;
Pablo Telloa518f302018-09-19 11:33:03 +0100107 NEGEMM _gemm_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000108 std::unique_ptr<INEKernel> _transform_input_kernel;
109 std::unique_ptr<INEKernel> _transform_output_kernel;
110 std::unique_ptr<INEKernel> _transform_weights_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000111 NEActivationLayer _activationlayer_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000112
Pablo Tello52140b42018-01-30 14:48:11 +0000113 CPPPermute _permute_input;
114 CPPPermute _permute_weights;
115 CPPPermute _permute_output;
Pablo Tello8f43d742019-03-27 09:28:32 +0000116 Tensor _input_transformed;
117 Tensor _output_transformed;
Pablo Tello52140b42018-01-30 14:48:11 +0000118 Tensor _input_workspace;
119 Tensor _output_workspace;
120 Tensor _kernel_storage;
121 Tensor _input_nhwc;
122 Tensor _output_nhwc;
123 Tensor _weights_hwio;
124 const ITensor *_input;
125 const ITensor *_weights;
126 ITensor *_output;
Georgios Pinitas72219332018-06-05 14:56:06 +0100127 bool _is_prepared;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000128 bool _is_activationlayer_enabled;
Pablo Tello89519332017-11-17 11:52:36 +0000129};
130}
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100131#endif /* __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__ */