blob: 27b1e842018ecbee19fe1b3c92aff4a6dc3dbe14 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Pablo Tello9ceebbe2018-01-10 16:44:13 +00002 * Copyright (c) 2017-2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEWINOGRADLAYER_H__
25#define __ARM_COMPUTE_NEWINOGRADLAYER_H__
26
27#include "arm_compute/runtime/IFunction.h"
28
Pablo Tellof6c572c2018-02-14 12:47:30 +000029#include "arm_compute/core/NEON/INEKernel.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "arm_compute/core/Types.h"
Pablo Tello02541fb2017-12-15 09:48:59 +000031#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
Pablo Tello89519332017-11-17 11:52:36 +000032#include "arm_compute/runtime/MemoryGroup.h"
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000033#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
Pablo Tello89519332017-11-17 11:52:36 +000034#include "arm_compute/runtime/Tensor.h"
35
36#include <memory>
37
38namespace arm_compute
39{
40class ITensor;
41/** Basic function to simulate a convolution layer. This function calls the following NEON kernels:
Pablo Tellof6c572c2018-02-14 12:47:30 +000042 * -# @ref NEWinogradLayerTransformWeightsKernel (executed only once in the first call to the run() method )
43 * -# @ref NEWinogradLayerTransformInputKernel
44 * -# @ref NEWinogradLayerTransformOutputKernel
45 * -# @ref NEWinogradLayerBatchedGEMMKernel
46 * -# @ref CPPPermute (three times: weights, input and output)
Pablo Tello89519332017-11-17 11:52:36 +000047 */
48class NEWinogradLayer : public IFunction
49{
50public:
51 /** Constructor */
52 NEWinogradLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
53
54 /** Set the input and output tensors.
55 *
56 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
57 * while every optional dimension from 4 and above represent a batch of inputs.
58 * Data types supported: F32.
59 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010060 * Currently only 3x3 and 5x5 kernels are supported.
Pablo Tellod6ca4782018-01-23 09:36:04 +000061 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
Pablo Tello89519332017-11-17 11:52:36 +000062 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
63 * Data types supported: Same as @p input.
64 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000065 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Pablo Tello89519332017-11-17 11:52:36 +000066 */
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000067 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
Pablo Tello89519332017-11-17 11:52:36 +000068
69 // Inherited methods overridden:
70 void run() override;
71
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000072 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
73 *
74 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
75 * while every optional dimension from 4 and above represent a batch of inputs.
76 * Data types supported: F32.
77 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010078 * Currently only 3x3 and 5x5 kernels are supported.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000079 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
80 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
81 * Data types supported: Same as @p input.
82 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. Currently only unit strides are supported.
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010083 * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000084 *
85 * @return a status
86 */
Vidhya Sudhan Loganathan3ca97862018-04-23 08:20:04 +010087 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
88 const ActivationLayerInfo &act_info = ActivationLayerInfo());
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000089
Pablo Tello89519332017-11-17 11:52:36 +000090 /** Prevent instances of this class from being copied (As this class contains pointers) */
91 NEWinogradLayer(const NEWinogradLayer &) = delete;
92 /** Prevent instances of this class from being copied (As this class contains pointers) */
93 NEWinogradLayer &operator=(const NEWinogradLayer &) = delete;
94
95private:
Pablo Tellof6c572c2018-02-14 12:47:30 +000096 MemoryGroup _memory_group;
97 std::unique_ptr<INEKernel> _batched_gemm_kernel;
98 std::unique_ptr<INEKernel> _transform_input_kernel;
99 std::unique_ptr<INEKernel> _transform_output_kernel;
100 std::unique_ptr<INEKernel> _transform_weights_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000101 NEActivationLayer _activationlayer_function;
Pablo Tellof6c572c2018-02-14 12:47:30 +0000102
Pablo Tello52140b42018-01-30 14:48:11 +0000103 CPPPermute _permute_input;
104 CPPPermute _permute_weights;
105 CPPPermute _permute_output;
106 Tensor _input_workspace;
107 Tensor _output_workspace;
108 Tensor _kernel_storage;
109 Tensor _input_nhwc;
110 Tensor _output_nhwc;
111 Tensor _weights_hwio;
112 const ITensor *_input;
113 const ITensor *_weights;
114 ITensor *_output;
115 bool _reshaped_kernel;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000116 bool _is_activationlayer_enabled;
Pablo Tello89519332017-11-17 11:52:36 +0000117};
118}
119#endif /* __ARM_COMPUTE_NEWINOGRADLAYER_H__ */