blob: 99e312183a91d159e6368d88a9cd211e98f0c60c [file] [log] [blame]
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01001/*
Georgios Pinitasdea2d2d2018-12-19 16:23:17 +00002 * Copyright (c) 2018-2019 ARM Limited.
Giuseppe Rossinid7647d42018-07-17 18:13:13 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
25
26#include "arm_compute/core/CL/ICLTensor.h"
27#include "arm_compute/core/Types.h"
Manuel Bottini60f39112019-03-18 15:25:15 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Giuseppe Rossinid7647d42018-07-17 18:13:13 +010029#include "support/ToolchainSupport.h"
30
31namespace arm_compute
32{
33CLPadLayer::CLPadLayer()
Michalis Spyroubcfd09a2019-05-01 13:03:59 +010034 : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
Giuseppe Rossinid7647d42018-07-17 18:13:13 +010035{
36}
37
Manuel Bottini60f39112019-03-18 15:25:15 +000038void CLPadLayer::configure_constant_mode(ICLTensor *input, ICLTensor *output, const PaddingList &padding, const PixelValue constant_value)
39{
40 // Set the pages of the output to the constant_value.
41 _memset_kernel.configure(output, constant_value);
42
43 // Fill out padding list with zeroes.
44 PaddingList padding_extended = padding;
45 for(size_t i = padding.size(); i < TensorShape::num_max_dimensions; i++)
46 {
47 padding_extended.emplace_back(PaddingInfo{ 0, 0 });
48 }
49
50 // Create a window within the output tensor where the input will be copied.
51 Window copy_window = Window();
52 for(uint32_t i = 0; i < output->info()->num_dimensions(); ++i)
53 {
54 copy_window.set(i, Window::Dimension(padding_extended[i].first, padding_extended[i].first + input->info()->dimension(i), 1));
55 }
56 // Copy the input to the output, leaving the padding filled with the constant_value.
57 _copy_kernel.configure(input, output, PaddingList(), &copy_window);
58}
59
60void CLPadLayer::configure_reflect_symmetric_mode(ICLTensor *input, ICLTensor *output)
61{
62 int64_t last_padding_dimension = _padding.size() - 1;
63 // Reflecting can be performed by effectively unfolding the input as follows:
64 // For each dimension starting at DimX:
65 // Create a before and after slice, which values depend on the selected padding mode
66 // Concatenate the before and after padding with the tensor to be padded
67
68 // Two strided slice functions will be required for each dimension padded as well as a
69 // concatenate function and the tensors to hold the temporary results.
Michalis Spyroubcfd09a2019-05-01 13:03:59 +010070 _slice_functions.resize(2 * _num_dimensions);
71 _slice_results.resize(2 * _num_dimensions);
72 _concat_functions.resize(_num_dimensions);
73 _concat_results.resize(_num_dimensions - 1);
74
75 Coordinates starts_before{};
76 Coordinates ends_before{};
77 Coordinates starts_after{};
78 Coordinates ends_after{};
79 Coordinates strides{};
Manuel Bottini60f39112019-03-18 15:25:15 +000080 ICLTensor *prev = input;
81 for(uint32_t i = 0; i < _num_dimensions; ++i)
82 {
83 // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again.
84 if(i > 0)
85 {
86 strides.set(i - 1, 1);
87 }
88
89 if(_padding[i].first > 0 || _padding[i].second > 0)
90 {
91 // Set the starts, ends, and strides values for the current dimension.
92 // Due to the bit masks passed to strided slice, the values below the current dimension in
93 // starts and ends will be ignored so do not need to be modified.
94 if(_mode == PaddingMode::REFLECT)
95 {
96 starts_before.set(i, _padding[i].first);
97 ends_before.set(i, 0);
98 starts_after.set(i, input->info()->dimension(i) - 2);
99 ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2);
100 strides.set(i, -1);
101 }
102 else
103 {
104 starts_before.set(i, _padding[i].first - 1);
105 ends_before.set(i, -1);
106 starts_after.set(i, input->info()->dimension(i) - 1);
107 ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1);
108 strides.set(i, -1);
109 }
110
111 // Strided slice wraps negative indexes around to the end of the range,
112 // instead this should indicate use of the full range and so the bit mask will be modified.
113 const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i);
114 const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i);
115 const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i);
116 const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i);
117
118 // Reflect the input values for the padding before and after the input.
119 std::vector<ICLTensor *> concat_vector;
120 if(_padding[i].first > 0)
121 {
122 if(i < prev->info()->num_dimensions())
123 {
124 _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
125 concat_vector.push_back(&_slice_results[2 * i]);
126 }
127 else
128 {
129 // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
130 concat_vector.push_back(prev);
131 }
132 }
133 concat_vector.push_back(prev);
134 if(_padding[i].second > 0)
135 {
136 if(i < prev->info()->num_dimensions())
137 {
138 _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
139 concat_vector.push_back(&_slice_results[2 * i + 1]);
140 }
141 else
142 {
143 // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
144 concat_vector.push_back(prev);
145 }
146 }
147 // Concatenate the padding before and after with the input.
148 ICLTensor *out = (static_cast<int32_t>(i) == last_padding_dimension) ? output : &_concat_results[i];
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100149 _concat_functions[i].configure(concat_vector, out, i);
Manuel Bottini60f39112019-03-18 15:25:15 +0000150 prev = out;
151 }
152 }
153 for(uint32_t i = 0; i < _num_dimensions; ++i)
154 {
155 if((static_cast<int32_t>(i) != last_padding_dimension))
156 {
157 _concat_results[i].allocator()->allocate();
158 }
159 _slice_results[2 * i].allocator()->allocate();
160 _slice_results[2 * i + 1].allocator()->allocate();
161 }
162}
163
Usama Arif8cf8c112019-03-14 15:36:54 +0000164void CLPadLayer::configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100165{
Manuel Bottini60f39112019-03-18 15:25:15 +0000166 ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode));
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100167
Manuel Bottini60f39112019-03-18 15:25:15 +0000168 _padding = padding;
169 _mode = mode;
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100170
Manuel Bottini60f39112019-03-18 15:25:15 +0000171 TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding);
172
173 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape));
174
175 // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied.
176 int64_t last_padding_dimension = _padding.size() - 1;
177 for(; last_padding_dimension >= 0; --last_padding_dimension)
178 {
179 if(_padding[last_padding_dimension].first > 0 || _padding[last_padding_dimension].second > 0)
180 {
181 break;
182 }
183 }
184 _num_dimensions = last_padding_dimension + 1;
185 if(_num_dimensions > 0)
186 {
187 switch(_mode)
188 {
189 case PaddingMode::CONSTANT:
190 {
191 configure_constant_mode(input, output, padding, constant_value);
192 break;
193 }
194 case PaddingMode::REFLECT:
195 case PaddingMode::SYMMETRIC:
196 {
197 configure_reflect_symmetric_mode(input, output);
198 break;
199 }
200 default:
201 ARM_COMPUTE_ERROR("Padding mode not supported.");
202 }
203 }
204 else
205 {
206 // Copy the input to the whole output if no padding is applied
207 _copy_kernel.configure(input, output);
208 }
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100209}
210
Usama Arif8cf8c112019-03-14 15:36:54 +0000211Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100212{
Manuel Bottini60f39112019-03-18 15:25:15 +0000213 ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > input->num_dimensions());
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100214
Manuel Bottini60f39112019-03-18 15:25:15 +0000215 TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding);
216
217 // Use CLCopyKernel and CLMemsetKernel to validate all padding modes as this includes all of the shape and info validation.
218 PaddingList padding_extended = padding;
219 for(size_t i = padding.size(); i < TensorShape::num_max_dimensions; i++)
220 {
221 padding_extended.emplace_back(PaddingInfo{ 0, 0 });
222 }
223
224 Window copy_window = Window();
225 for(uint32_t i = 0; i < padded_shape.num_dimensions(); ++i)
226 {
227 copy_window.set(i, Window::Dimension(padding_extended[i].first, padding_extended[i].first + input->dimension(i), 1));
228 }
229 if(output->total_size() > 0)
230 {
231 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape);
232 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(output, input);
233 ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(input, output, PaddingList(), &copy_window));
234 ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(output, constant_value));
235 }
236 else
237 {
238 ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(input, &input->clone()->set_tensor_shape(padded_shape), PaddingList(), &copy_window));
239 ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(&input->clone()->set_tensor_shape(padded_shape), constant_value));
240 }
241
242 switch(mode)
243 {
244 case PaddingMode::CONSTANT:
245 {
246 break;
247 }
248 case PaddingMode::REFLECT:
249 case PaddingMode::SYMMETRIC:
250 {
251 for(uint32_t i = 0; i < padding.size(); ++i)
252 {
253 if(mode == PaddingMode::REFLECT)
254 {
255 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i));
256 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i));
257 }
258 else
259 {
260 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i));
261 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i));
262 }
263 }
264 break;
265 }
266 default:
267 {
268 ARM_COMPUTE_ERROR("Invalid mode");
269 }
270 }
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100271 return Status{};
272}
273
274void CLPadLayer::run()
275{
Manuel Bottini60f39112019-03-18 15:25:15 +0000276 if(_num_dimensions > 0)
277 {
278 switch(_mode)
279 {
280 case PaddingMode::CONSTANT:
281 {
282 CLScheduler::get().enqueue(_memset_kernel, false);
283 CLScheduler::get().enqueue(_copy_kernel, true);
284 break;
285 }
286 case PaddingMode::REFLECT:
287 case PaddingMode::SYMMETRIC:
288 {
289 for(uint32_t i = 0; i < _num_dimensions; ++i)
290 {
291 if(_padding[i].first > 0 || _padding[i].second > 0)
292 {
293 if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0)
294 {
295 _slice_functions[2 * i].run();
296 }
297 if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0)
298 {
299 _slice_functions[2 * i + 1].run();
300 }
301 CLScheduler::get().sync();
302 _concat_functions[i].run();
303 CLScheduler::get().sync();
304 }
305 }
306 break;
307 }
308 default:
309 ARM_COMPUTE_ERROR("Padding mode not supported.");
310 }
311 }
312 else
313 {
314 CLScheduler::get().enqueue(_copy_kernel, true);
315 }
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100316}
317} // namespace arm_compute