blob: a272a8118bff2114c741269bede1d3bc40dd1f59 [file] [log] [blame]
Georgios Pinitas6d9d6f42018-12-24 16:10:47 +00001/*
2 * Copyright (c) 2018-2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NE_STRIDED_SLICE_KERNEL_H__
25#define __ARM_COMPUTE_NE_STRIDED_SLICE_KERNEL_H__
26
27#include "arm_compute/core/NEON/INEKernel.h"
28#include "arm_compute/core/Types.h"
29
30#include <cstdint>
31
32namespace arm_compute
33{
34// Forward declarations
35class ITensor;
36
37/** Interface for the kernel to perform tensor strided slicing */
38class NEStridedSliceKernel : public INEKernel
39{
40public:
41 const char *name() const override
42 {
43 return "NEStridedSliceKernel";
44 }
45 /** Default constructor */
46 NEStridedSliceKernel();
47 /** Prevent instances of this class from being copied (As this class contains pointers) */
48 NEStridedSliceKernel(const NEStridedSliceKernel &) = delete;
49 /** Prevent instances of this class from being copied (As this class contains pointers) */
50 NEStridedSliceKernel &operator=(const NEStridedSliceKernel &) = delete;
51 /** Allow instances of this class to be moved */
52 NEStridedSliceKernel(NEStridedSliceKernel &&) = default;
53 /** Allow instances of this class to be moved */
54 NEStridedSliceKernel &operator=(NEStridedSliceKernel &&) = default;
55 /** Default destructor */
56 ~NEStridedSliceKernel() = default;
57 /** Configure kernel
58 *
59 * @note Supported tensor rank: up to 4
60 *
61 * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
62 * @param[out] output Destination tensor. Data type supported: Same as @p input
63 * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
64 * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
65 * @param[in] strides The strides of the dimensions of the input tensor to be sliced. The length must be of rank(input).
66 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
67 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
68 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1.
69 * A slice of size 1 starting from starts[i] in the dimension must be preserved.
70 */
71 void configure(const ITensor *input, ITensor *output,
72 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
73 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask);
74
75 /** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel
76 *
77 * @note Supported tensor rank: up to 4
78 *
79 * @param[in] input Source tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
80 * @param[in] output Destination tensor info. Data type supported: Same as @p input
81 * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
82 * @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
83 * @param[in] strides The strides of the dimensions of the input tensor to be sliced. The length must be of rank(input).
84 * @param[in] begin_mask If the ith bit of begin_mask is set, starts[i] is ignored and the fullest possible range in that dimension is used instead.
85 * @param[in] end_mask If the ith bit of end_mask is set, ends[i] is ignored and the fullest possible range in that dimension is used instead.
86 * @param[in] shrink_axis_mask If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks the dimensionality by 1.
87 * A slice of size 1 starting from starts[i] in the dimension must be preserved.
88 */
89 static Status validate(const ITensorInfo *input, const ITensorInfo *output,
90 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
91 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask);
92
93 // Inherited methods overridden:
94 void run(const Window &window, const ThreadInfo &info) override;
95
96private:
97 const ITensor *_input; /**< Source tensor */
98 ITensor *_output; /**< Destination tensor */
99 Coordinates _starts_abs; /**< Absolute start coordinates */
100 Coordinates _final_strides; /**< Final strides */
101 int32_t _shrink_mask; /**< Shrink axis mask */
102};
103} // namespace arm_compute
104#endif /*__ARM_COMPUTE_NE_STRIDED_SLICE_KERNEL_H__ */