blob: bb5756de932903564146e0ec91d026a8ead143c6 [file] [log] [blame]
Georgios Pinitas6d9d6f42018-12-24 16:10:47 +00001/*
2 * Copyright (c) 2018-2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEStridedSliceKernel.h"
25
26#include "arm_compute/core/CPP/Validate.h"
27#include "arm_compute/core/IAccessWindow.h"
28#include "arm_compute/core/ITensor.h"
29#include "arm_compute/core/TensorInfo.h"
30#include "arm_compute/core/Window.h"
31
32#include "arm_compute/core/Types.h"
33#include "arm_compute/core/utils/helpers/bit_ops.h"
34#include "arm_compute/core/utils/helpers/tensor_transform.h"
35#include "arm_compute/core/utils/misc/ShapeCalculator.h"
36
37namespace arm_compute
38{
39namespace
40{
41Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
42 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
43 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
44{
45 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
46 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
47 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1,
48 DataType::U8, DataType::S8, DataType::QASYMM8,
49 DataType::U16, DataType::S16,
50 DataType::U32, DataType::S32,
51 DataType::F16, DataType::F32);
52
53 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().num_dimensions() > 4);
54 ARM_COMPUTE_RETURN_ERROR_ON(starts.num_dimensions() > input->num_dimensions());
55 ARM_COMPUTE_RETURN_ERROR_ON(ends.num_dimensions() > input->num_dimensions());
56 ARM_COMPUTE_RETURN_ERROR_ON(strides.num_dimensions() > input->num_dimensions());
57 ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(strides.cbegin(), strides.cbegin() + strides.num_dimensions(), [](int i)
58 {
59 return i == 0;
60 }));
61
62 // Get expected output shape
63 const TensorShape exp_output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(*input,
64 starts, ends, strides,
65 begin_mask, end_mask, shrink_axis_mask);
66 ARM_COMPUTE_RETURN_ERROR_ON(exp_output_shape.total_size() == 0);
67
68 // Checks output if configured
69 if(output->total_size() != 0)
70 {
71 const TensorInfo exp_output_info = output->clone()->set_tensor_shape(exp_output_shape);
72 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &exp_output_info);
73 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
74 }
75
76 return Status{};
77}
78
79std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output,
80 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
81 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
82{
83 // Output tensor auto initialization if not yet initialized
84 const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(*input,
85 starts, ends, strides,
86 begin_mask, end_mask, shrink_axis_mask);
87 auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape));
88
89 // Create window
90 const unsigned int num_elems_processed_per_iteration = 1;
91
92 Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
93 output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
94
95 return std::make_pair(Status{}, win);
96}
97
98void strided_slice_generic(const ITensor *input, ITensor *output,
99 const Coordinates &starts, const BiStrides &strides, int32_t shrink_axis_mask,
100 const Window &window)
101{
102 Iterator output_it(output, window);
103 const size_t width_size = input->info()->element_size();
104
105 const bool is_shrink_w = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, 0);
106 const bool is_shrink_h = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, 1);
107 const bool is_shrink_c = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, 2);
108 const bool is_shrink_n = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, 3);
109
110 unsigned int index = 0;
111 const int idx_w = is_shrink_w ? 0 : index++;
112 const int idx_h = is_shrink_h ? 0 : index++;
113 const int idx_c = is_shrink_c ? 0 : index++;
114 const int idx_n = is_shrink_n ? 0 : index;
115
116 BiStrides shrinked_strides;
117 shrinked_strides.set(0, is_shrink_w ? 0 : strides[0]);
118 shrinked_strides.set(1, is_shrink_h ? 0 : strides[1]);
119 shrinked_strides.set(2, is_shrink_c ? 0 : strides[2]);
120 shrinked_strides.set(3, is_shrink_n ? 0 : strides[3]);
121
122 execute_window_loop(window, [&](const Coordinates & id)
123 {
124 const int w_coord = starts[0] + (id[idx_w] * shrinked_strides[0]);
125 const int h_coord = starts[1] + (id[idx_h] * shrinked_strides[1]);
126 const int c_coord = starts[2] + (id[idx_c] * shrinked_strides[2]);
127 const int n_coord = starts[3] + (id[idx_n] * shrinked_strides[3]);
128
129 Coordinates in_coords(w_coord, h_coord, c_coord, n_coord);
130 std::copy_n(input->ptr_to_element(in_coords), width_size, output_it.ptr());
131 },
132 output_it);
133}
134} // namespace
135
136NEStridedSliceKernel::NEStridedSliceKernel()
137 : _input(nullptr), _output(nullptr), _starts_abs(), _final_strides(), _shrink_mask()
138{
139}
140
141void NEStridedSliceKernel::configure(const ITensor *input, ITensor *output,
142 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
143 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
144{
145 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
146 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
147
148 _input = input;
149 _output = output;
150 _shrink_mask = shrink_axis_mask;
151
152 const TensorShape &input_shape = input->info()->tensor_shape();
153
154 Coordinates ends_abs;
155 std::tie(_starts_abs, ends_abs, _final_strides) = arm_compute::helpers::tensor_transform::calculate_strided_slice_coords(
156 input_shape,
157 starts, ends, strides,
158 begin_mask, end_mask, shrink_axis_mask);
159
160 // Configure kernel window
161 auto win_config = validate_and_configure_window(input->info(), output->info(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
162 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
163 INEKernel::configure(win_config.second);
164}
165
166Status NEStridedSliceKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
167 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
168 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
169{
170 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
171 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(),
172 starts, ends, strides, begin_mask, end_mask, shrink_axis_mask)
173 .first);
174
175 return Status{};
176}
177
178void NEStridedSliceKernel::run(const Window &window, const ThreadInfo &info)
179{
180 ARM_COMPUTE_UNUSED(info);
181 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
182 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
183
184 // Dispatch kernel
185 strided_slice_generic(_input, _output, _starts_abs, _final_strides, _shrink_mask, window);
186}
187} // namespace arm_compute