blob: f8c841ab6afef5b55d81d97a0fc8574bf7930766 [file] [log] [blame]
Gian Marco Iodice9285adb2019-09-05 16:10:27 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H__
25#define __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H__
26
27#include "arm_compute/core/CL/ICLKernel.h"
28
29#include "arm_compute/core/KernelDescriptors.h"
30
31namespace arm_compute
32{
33class ICLTensor;
34
35/** Interface for the kernel to run a MxN depthwise convolution. M and N are respectively the rows and columns of the filter
36 This kernel assumes that tensor for the weights is NOT reshaped (Native version) */
37class CLDepthwiseConvolutionLayerNativeKernel : public ICLKernel
38{
39public:
40 /** Default Constructor */
41 CLDepthwiseConvolutionLayerNativeKernel();
42 /** Prevent instances of this class from being copied (As this class contains pointers) */
43 CLDepthwiseConvolutionLayerNativeKernel(const CLDepthwiseConvolutionLayerNativeKernel &) = delete;
44 /** Prevent instances of this class from being copied (As this class contains pointers) */
45 CLDepthwiseConvolutionLayerNativeKernel &operator=(const CLDepthwiseConvolutionLayerNativeKernel &) = delete;
46 /** Allow instances of this class to be moved */
47 CLDepthwiseConvolutionLayerNativeKernel(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
48 /** Allow instances of this class to be moved */
49 CLDepthwiseConvolutionLayerNativeKernel &operator=(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
50 /** Initialize the function's source, destination and parameters
51 *
52 * @param[in] input Source tensor. Data type supported: FP32/FP16. Data layout supported: NHWC
53 * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M]. Data type supported: Same as @p input.
54 * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
55 * Data type supported: Same as @p input.
56 * @param[out] output Destination tensor. Data type supported: Same as @p input.
57 * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
58 * @param[in] dwc_info Depthwise convolution layer info
59 * @param[in] conv_info Padding and stride information to use for the convolution.
60 * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
61 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
62 */
63 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info, const DWCKernelInfo &dwc_info,
64 const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U));
65 /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
66 *
67 * @param[in] input Source tensor info. Data type supported: FP32/FP16. Data layout supported: NHWC
68 * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, N, M]. Data type supported: Same as @p input.
69 * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
70 * Data type supported: Same as @p input.
71 * @param[in] output Destination tensor info. Data type supported: Same as @p input.
72 * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
73 * @param[in] dwc_info Depthwise convolution layer info
74 * @param[in] conv_info Padding and stride information to use for the convolution.
75 * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
76 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
77 *
78 * @return a status
79 */
80 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info,
81 const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U));
82
83 // Inherited methods overridden:
84 void run(const Window &window, cl::CommandQueue &queue) override;
85
86private:
87 const ICLTensor *_input;
88 const ICLTensor *_weights;
89 const ICLTensor *_biases;
90 ICLTensor *_output;
91 unsigned int _depth_multiplier;
92};
93} // namespace arm_compute
94#endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H__ */