blob: c1260977c0df1005dc1a3088cefbabbf1fa6deec [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Pablo Tello9ceebbe2018-01-10 16:44:13 +00002 * Copyright (c) 2017-2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010024#ifndef __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__
25#define __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__
Pablo Tello89519332017-11-17 11:52:36 +000026
27#include "arm_compute/runtime/IFunction.h"
28
Pablo Tellof6c572c2018-02-14 12:47:30 +000029#include "arm_compute/core/NEON/INEKernel.h"
Michalis Spyrou2b3129e2018-04-25 18:10:13 +010030#include "arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp"
Pablo Tello89519332017-11-17 11:52:36 +000031#include "arm_compute/core/Types.h"
Pablo Tello02541fb2017-12-15 09:48:59 +000032#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
Pablo Tello89519332017-11-17 11:52:36 +000033#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000034#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
Pablo Tello89519332017-11-17 11:52:36 +000035#include "arm_compute/runtime/Tensor.h"
36
37#include <memory>
38
39namespace arm_compute
40{
41class ITensor;
42/** Basic function to simulate a convolution layer. This function calls the following NEON kernels:
Pablo Tellof6c572c2018-02-14 12:47:30 +000043 * -# @ref NEWinogradLayerTransformWeightsKernel (executed only once in the first call to the run() method )
44 * -# @ref NEWinogradLayerTransformInputKernel
45 * -# @ref NEWinogradLayerTransformOutputKernel
46 * -# @ref NEWinogradLayerBatchedGEMMKernel
47 * -# @ref CPPPermute (three times: weights, input and output)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010048 *
49 * @note Some Winograd configurations (i.e. F(2x2, 5x5), F(4x4, 5x5)) are supported only with enable_fast_math = true
Pablo Tello89519332017-11-17 11:52:36 +000050 */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010051class NEWinogradConvolutionLayer : public IFunction
Pablo Tello89519332017-11-17 11:52:36 +000052{
53public:
54 /** Constructor */
Georgios Pinitas9fb11592018-04-26 20:34:58 +010055 NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Pablo Tello89519332017-11-17 11:52:36 +000056
57 /** Set the input and output tensors.
58 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010059 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
60 * while every optional dimension from 4 and above represent a batch of inputs.
61 * Data types supported: F32.
62 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
63 * Currently only 3x3 and 5x5 kernels are supported.
64 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
65 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
66 * Data types supported: Same as @p input.
67 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
68 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
69 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
70 * available which may introduce a drop of accuracy as well. Default is false
Pablo Tello89519332017-11-17 11:52:36 +000071 */
Giorgio Arenaa3221e62018-05-03 15:57:48 +010072 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo(),
73 bool enable_fast_math = false);
Pablo Tello89519332017-11-17 11:52:36 +000074
75 // Inherited methods overridden:
76 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +010077 void prepare() override;
Pablo Tello89519332017-11-17 11:52:36 +000078
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000079 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
80 *
Giorgio Arenaa3221e62018-05-03 15:57:48 +010081 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
82 * while every optional dimension from 4 and above represent a batch of inputs.
83 * Data types supported: F32.
84 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
85 * Currently only 3x3 and 5x5 kernels are supported.
86 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
87 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
88 * Data types supported: Same as @p input.
89 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
90 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
91 * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
92 * available which may introduce a drop of accuracy as well. Default is false
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000093 *
94 * @return a status
95 */
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010096 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
Giorgio Arenaa3221e62018-05-03 15:57:48 +010097 const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000098
Pablo Tello89519332017-11-17 11:52:36 +000099 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100100 NEWinogradConvolutionLayer(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000101 /** Prevent instances of this class from being copied (As this class contains pointers) */
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100102 NEWinogradConvolutionLayer &operator=(const NEWinogradConvolutionLayer &) = delete;
Pablo Tello89519332017-11-17 11:52:36 +0000103
104private:
Michalis Spyrou2b3129e2018-04-25 18:10:13 +0100105 MemoryGroup _memory_group;
106 std::unique_ptr<arm_gemm::GemmCommon<float, float>> _arm_gemm;
107 std::unique_ptr<INEKernel> _gemm_kernel;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000108 std::unique_ptr<INEKernel> _transform_input_kernel;
109 std::unique_ptr<INEKernel> _transform_output_kernel;
110 std::unique_ptr<INEKernel> _transform_weights_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000111 NEActivationLayer _activationlayer_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000112
Pablo Tello52140b42018-01-30 14:48:11 +0000113 CPPPermute _permute_input;
114 CPPPermute _permute_weights;
115 CPPPermute _permute_output;
116 Tensor _input_workspace;
117 Tensor _output_workspace;
118 Tensor _kernel_storage;
119 Tensor _input_nhwc;
120 Tensor _output_nhwc;
121 Tensor _weights_hwio;
Michalis Spyrou2b3129e2018-04-25 18:10:13 +0100122 Tensor _workspace;
Pablo Tello52140b42018-01-30 14:48:11 +0000123 const ITensor *_input;
124 const ITensor *_weights;
125 ITensor *_output;
Georgios Pinitas72219332018-06-05 14:56:06 +0100126 bool _is_prepared;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000127 bool _is_activationlayer_enabled;
Pablo Tello89519332017-11-17 11:52:36 +0000128};
129}
Georgios Pinitas9fb11592018-04-26 20:34:58 +0100130#endif /* __ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H__ */