blob: c433f183a07c285cc575e4b538e8e40fc384d95c [file] [log] [blame]
giuros01ba368252019-02-19 13:53:10 +00001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NESPACETOBATCHLAYER_H
25#define ARM_COMPUTE_NESPACETOBATCHLAYER_H
giuros01ba368252019-02-19 13:53:10 +000026
27#include "arm_compute/runtime/IFunction.h"
28
29#include "arm_compute/core/NEON/kernels/NEMemsetKernel.h"
30#include "arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
31#include "arm_compute/core/Types.h"
32
33namespace arm_compute
34{
35class ITensor;
36
37/** Basic function to spatial divide a tensor. This function calls the following NEON kernels/functions:
38 *
39 * -# @ref NEMemsetKernel
40 * -# @ref NESpaceToBatchLayerKernel
41 */
42class NESpaceToBatchLayer : public IFunction
43{
44public:
45 /** Default constructor */
46 NESpaceToBatchLayer();
47 /** Prevent instances of this class from being copied (As this class contains pointers) */
48 NESpaceToBatchLayer(const NESpaceToBatchLayer &) = delete;
49 /** Prevent instances of this class from being copied (As this class contains pointers) */
50 NESpaceToBatchLayer &operator=(const NESpaceToBatchLayer &) = delete;
51 /** Allow instances of this class to be moved */
52 NESpaceToBatchLayer(NESpaceToBatchLayer &&) = default;
53 /** Allow instances of this class to be moved */
54 NESpaceToBatchLayer &operator=(NESpaceToBatchLayer &&) = default;
55 /** Default destructor */
56 virtual ~NESpaceToBatchLayer() = default;
57 /** Set the input and output tensors.
58 *
59 * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
60 * @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
61 * @param[in] paddings 2-D tensor with shape [2, M]. Data types supported: S32
62 * @param[out] output Tensor output. Data types supported: same as @p input
63 */
64 void configure(const ITensor *input, const ITensor *block_shape, const ITensor *paddings, ITensor *output);
65 /** Set the input and output tensors. (Static block shape and paddings)
66 *
67 * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
68 * @param[in] block_shape_x Block shape x value.
69 * @param[in] block_shape_y Block shape y value.
70 * @param[in] padding_left The left padding of the output tensor.
71 * @param[in] padding_right The right padding of the output tensor.
72 * @param[out] output Tensor output. Data types supported: same as @p input
73 */
74 void configure(const ITensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ITensor *output);
75 /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToBatchLayer
76 *
77 * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
78 * @param[in] block_shape block shape tensor info with shape [M]. Data types supported: S32
79 * @param[in] paddings paddings tensor info with shape [2, M]. Data types supported: S32
80 * @param[in] output Tensor output info. Data types supported: same as @p input
81 *
82 * @return a status
83 */
84 static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output);
85 /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToBatchLayer (Static block shape and paddings)
86 *
87 * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
88 * @param[in] block_shape_x Block shape x value.
89 * @param[in] block_shape_y Block shape y value.
90 * @param[in] padding_left The left padding of the output tensor.
91 * @param[in] padding_right The right padding of the output tensor.
92 * @param[in] output Tensor output info. Data types supported: same as @p input
93 *
94 * @return a status
95 */
96 static Status validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, const ITensorInfo *output);
97
98 // Inherited methods overridden:
99 void run() override;
100
101private:
102 NESpaceToBatchLayerKernel _space_to_batch_kernel; /**< SpaceToBatch kernel to run */
103 NEMemsetKernel _memset_kernel; /**< Memset kernel to run */
104 bool _has_padding; /**< Flag to check if the output has padding */
105};
106} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000107#endif /* ARM_COMPUTE_NESPACETOBATCHLAYER_H */