blob: 531b06de64603cd0670dc3f94dec28eaae3d294b [file] [log] [blame]
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00001/*
Georgios Pinitas0f7ef8a2021-01-10 04:23:52 +00002 * Copyright (c) 2018-2021 Arm Limited.
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
25
26#include "arm_compute/runtime/NEON/NEScheduler.h"
27
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000028#include "arm_compute/core/Types.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Michalis Spyrouebcebf12020-10-21 00:04:14 +010030#include "src/core/NEON/kernels/NEPadLayerKernel.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010031#include "src/core/helpers/AutoConfiguration.h"
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000032
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000033namespace arm_compute
34{
35namespace
36{
Usama Arif8cf8c112019-03-14 15:36:54 +000037uint32_t last_padding_dimension(const PaddingList &padding)
38{
39 int last_padding_dim = padding.size() - 1;
40 for(; last_padding_dim >= 0; --last_padding_dim)
41 {
42 if(padding[last_padding_dim].first > 0 || padding[last_padding_dim].second > 0)
43 {
44 break;
45 }
46 }
47 return static_cast<uint32_t>(last_padding_dim);
48}
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000049} // namespace
50
Michalis Spyrouebcebf12020-10-21 00:04:14 +010051NEPadLayer::~NEPadLayer() = default;
52
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000053NEPadLayer::NEPadLayer()
Georgios Pinitas0f7ef8a2021-01-10 04:23:52 +000054 : _copy_function(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000055{
56}
57
Usama Arif8cf8c112019-03-14 15:36:54 +000058void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value)
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000059{
Georgios Pinitas40f51a62020-11-21 03:04:18 +000060 _pad_kernel = std::make_unique<NEPadLayerKernel>();
Michalis Spyrouebcebf12020-10-21 00:04:14 +010061 _pad_kernel->configure(input, output, padding, constant_value, PaddingMode::CONSTANT);
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +000062}
63
Usama Arif8cf8c112019-03-14 15:36:54 +000064void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output)
65{
66 // Reflecting can be performed by effectively unfolding the input as follows:
67 // For each dimension starting at DimX:
68 // For before and after:
69 // Use strided slice to extract and reverse the part of the
70 // input / previously produced tensor required for the padding.
71 // Concatenate the before and after padding with the input / previously
72 // produced tensor along the current dimension.
73
74 // Two strided slice functions will be required for each dimension padded as well as a
75 // concatenate function and the tensors to hold the temporary results.
Michalis Spyroubcfd09a2019-05-01 13:03:59 +010076 _slice_functions.resize(2 * _num_dimensions);
77 _slice_results.resize(2 * _num_dimensions);
78 _concat_functions.resize(_num_dimensions);
79 _concat_results.resize(_num_dimensions - 1);
80
81 Coordinates starts_before{};
82 Coordinates ends_before{};
83 Coordinates starts_after{};
84 Coordinates ends_after{};
85 Coordinates strides{};
Usama Arif8cf8c112019-03-14 15:36:54 +000086 ITensor *prev = input;
87 for(uint32_t i = 0; i < _num_dimensions; ++i)
88 {
89 // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again.
90 if(i > 0)
91 {
92 strides.set(i - 1, 1);
93 }
94
95 if(_padding[i].first > 0 || _padding[i].second > 0)
96 {
97 // Set the starts, ends, and strides values for the current dimension.
98 // Due to the bit masks passed to strided slice, the values below the current dimension in
99 // starts and ends will be ignored so do not need to be modified.
100 if(_mode == PaddingMode::REFLECT)
101 {
102 starts_before.set(i, _padding[i].first);
103 ends_before.set(i, 0);
104 starts_after.set(i, input->info()->dimension(i) - 2);
105 ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2);
106 strides.set(i, -1);
107 }
108 else
109 {
110 starts_before.set(i, _padding[i].first - 1);
111 ends_before.set(i, -1);
112 starts_after.set(i, input->info()->dimension(i) - 1);
113 ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1);
114 strides.set(i, -1);
115 }
116
117 // Strided slice wraps negative indexes around to the end of the range,
118 // instead this should indicate use of the full range and so the bit mask will be modified.
119 const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i);
120 const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i);
121 const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i);
122 const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i);
123
124 // Reflect the input values for the padding before and after the input.
Georgios Pinitas4667ddd2020-07-13 21:21:33 +0100125 std::vector<const ITensor *> concat_vector;
Usama Arif8cf8c112019-03-14 15:36:54 +0000126 if(_padding[i].first > 0)
127 {
128 if(i < prev->info()->num_dimensions())
129 {
130 _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100131 concat_vector.emplace_back(&_slice_results[2 * i]);
Usama Arif8cf8c112019-03-14 15:36:54 +0000132 }
133 else
134 {
135 // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
136 concat_vector.push_back(prev);
137 }
138 }
139 concat_vector.push_back(prev);
140 if(_padding[i].second > 0)
141 {
142 if(i < prev->info()->num_dimensions())
143 {
144 _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100145 concat_vector.emplace_back(&_slice_results[2 * i + 1]);
Usama Arif8cf8c112019-03-14 15:36:54 +0000146 }
147 else
148 {
149 // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
150 concat_vector.push_back(prev);
151 }
152 }
153 // Concatenate the padding before and after with the input.
154 ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i];
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100155 _concat_functions[i].configure(concat_vector, out, i);
Usama Arif8cf8c112019-03-14 15:36:54 +0000156 if(i != _num_dimensions - 1)
157 {
158 _concat_results[i].allocator()->allocate();
159 }
160 prev = out;
161 }
162 _slice_results[2 * i].allocator()->allocate();
163 _slice_results[2 * i + 1].allocator()->allocate();
164 }
165}
166
167void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
168{
169 ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode));
170
171 _padding = padding;
172 _mode = mode;
173
174 const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding);
175
176 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape));
177
178 // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied.
179 _num_dimensions = last_padding_dimension(padding) + 1;
180 if(_num_dimensions > 0)
181 {
182 switch(_mode)
183 {
184 case PaddingMode::CONSTANT:
185 {
186 configure_constant_mode(input, output, padding, constant_value);
187 break;
188 }
189 case PaddingMode::REFLECT:
190 case PaddingMode::SYMMETRIC:
191 {
192 configure_reflect_symmetric_mode(input, output);
193 break;
194 }
195 default:
196 ARM_COMPUTE_ERROR("Padding mode not supported.");
197 }
198 }
199 else
200 {
201 // Copy the input to the whole output if no padding is applied
Georgios Pinitas0f7ef8a2021-01-10 04:23:52 +0000202 _copy_function.configure(input, output);
Usama Arif8cf8c112019-03-14 15:36:54 +0000203 }
204}
205
206Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +0000207{
208 ARM_COMPUTE_UNUSED(constant_value);
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +0000209
Usama Arif8cf8c112019-03-14 15:36:54 +0000210 const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding);
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +0000211
Usama Arif8cf8c112019-03-14 15:36:54 +0000212 if(output->total_size() > 0)
213 {
214 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape);
215 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
216 }
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +0000217
Usama Arif8cf8c112019-03-14 15:36:54 +0000218 switch(mode)
219 {
220 case PaddingMode::CONSTANT:
221 {
Manuel Bottini9032ee32019-08-07 17:04:11 +0100222 return NEPadLayerKernel::validate(input, output, padding, constant_value, mode);
Usama Arif8cf8c112019-03-14 15:36:54 +0000223 }
224 case PaddingMode::REFLECT:
225 case PaddingMode::SYMMETRIC:
226 {
227 for(uint32_t i = 0; i < padding.size(); ++i)
228 {
229 if(mode == PaddingMode::REFLECT)
230 {
231 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i));
232 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i));
233 }
234 else
235 {
236 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i));
237 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i));
238 }
239 }
240 break;
241 }
242 default:
243 {
244 ARM_COMPUTE_ERROR("Invalid mode");
245 }
246 }
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +0000247 return Status{};
248}
249
250void NEPadLayer::run()
251{
Usama Arif8cf8c112019-03-14 15:36:54 +0000252 if(_num_dimensions > 0)
253 {
254 switch(_mode)
255 {
256 case PaddingMode::CONSTANT:
257 {
Michalis Spyrouebcebf12020-10-21 00:04:14 +0100258 NEScheduler::get().schedule(_pad_kernel.get(), Window::DimZ);
Usama Arif8cf8c112019-03-14 15:36:54 +0000259 break;
260 }
261 case PaddingMode::REFLECT:
262 case PaddingMode::SYMMETRIC:
263 {
264 for(uint32_t i = 0; i < _num_dimensions; ++i)
265 {
266 if(_padding[i].first > 0 || _padding[i].second > 0)
267 {
268 if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0)
269 {
270 _slice_functions[2 * i].run();
271 }
272 if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0)
273 {
274 _slice_functions[2 * i + 1].run();
275 }
276 _concat_functions[i].run();
277 }
278 }
279 break;
280 }
281 default:
282 ARM_COMPUTE_ERROR("Padding mode not supported.");
283 }
284 }
285 else
286 {
Georgios Pinitas0f7ef8a2021-01-10 04:23:52 +0000287 _copy_function.run();
Usama Arif8cf8c112019-03-14 15:36:54 +0000288 }
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +0000289}
290} // namespace arm_compute