Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018-2019 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/runtime/NEON/functions/NEPadLayer.h" |
| 25 | |
| 26 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
| 27 | |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 28 | #include "arm_compute/core/Types.h" |
| 29 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
| 30 | |
| 31 | #include "support/ToolchainSupport.h" |
| 32 | |
| 33 | namespace arm_compute |
| 34 | { |
| 35 | namespace |
| 36 | { |
| 37 | TensorInfo get_expected_output_tensorinfo(const ITensorInfo &input, const PaddingList &paddings) |
| 38 | { |
| 39 | const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input.tensor_shape(), paddings); |
| 40 | const TensorInfo expected_output_info = input.clone()->set_tensor_shape(expected_output_shape); |
| 41 | return expected_output_info; |
| 42 | } |
| 43 | |
| 44 | Status validate_arguments(const ITensorInfo &input, ITensorInfo &output, const PaddingList &paddings) |
| 45 | { |
| 46 | const TensorInfo expected_output_info = get_expected_output_tensorinfo(input, paddings); |
| 47 | auto_init_if_empty(output, expected_output_info); |
| 48 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&output, &expected_output_info); |
| 49 | |
| 50 | return Status{}; |
| 51 | } |
| 52 | |
| 53 | Coordinates get_subtensor_coords(const PaddingList &paddings) |
| 54 | { |
| 55 | Coordinates coords; |
| 56 | for(unsigned int i = 0; i < paddings.size(); ++i) |
| 57 | { |
| 58 | coords.set(i, paddings[i].first); |
| 59 | } |
| 60 | |
| 61 | return coords; |
| 62 | } |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 63 | |
| 64 | uint32_t last_padding_dimension(const PaddingList &padding) |
| 65 | { |
| 66 | int last_padding_dim = padding.size() - 1; |
| 67 | for(; last_padding_dim >= 0; --last_padding_dim) |
| 68 | { |
| 69 | if(padding[last_padding_dim].first > 0 || padding[last_padding_dim].second > 0) |
| 70 | { |
| 71 | break; |
| 72 | } |
| 73 | } |
| 74 | return static_cast<uint32_t>(last_padding_dim); |
| 75 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 76 | } // namespace |
| 77 | |
| 78 | NEPadLayer::NEPadLayer() |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 79 | : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results(), _output_subtensor() |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 80 | { |
| 81 | } |
| 82 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 83 | void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value) |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 84 | { |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 85 | // Auto-init |
| 86 | auto_init_if_empty(*output->info(), get_expected_output_tensorinfo(*input->info(), padding)); |
| 87 | |
| 88 | // Create SubTensor (Can use sub-tensor as the kernels to be executed do not require padding) |
| 89 | _output_subtensor = SubTensor(output, input->info()->tensor_shape(), get_subtensor_coords(padding), true); |
| 90 | |
| 91 | // Set the pages of the output to the specified value |
| 92 | _memset_kernel.configure(output, constant_value); |
| 93 | |
| 94 | // Copy the input to the output |
| 95 | _copy_kernel.configure(input, &_output_subtensor); |
| 96 | } |
| 97 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 98 | void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output) |
| 99 | { |
| 100 | // Reflecting can be performed by effectively unfolding the input as follows: |
| 101 | // For each dimension starting at DimX: |
| 102 | // For before and after: |
| 103 | // Use strided slice to extract and reverse the part of the |
| 104 | // input / previously produced tensor required for the padding. |
| 105 | // Concatenate the before and after padding with the input / previously |
| 106 | // produced tensor along the current dimension. |
| 107 | |
| 108 | // Two strided slice functions will be required for each dimension padded as well as a |
| 109 | // concatenate function and the tensors to hold the temporary results. |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 110 | _slice_functions.resize(2 * _num_dimensions); |
| 111 | _slice_results.resize(2 * _num_dimensions); |
| 112 | _concat_functions.resize(_num_dimensions); |
| 113 | _concat_results.resize(_num_dimensions - 1); |
| 114 | |
| 115 | Coordinates starts_before{}; |
| 116 | Coordinates ends_before{}; |
| 117 | Coordinates starts_after{}; |
| 118 | Coordinates ends_after{}; |
| 119 | Coordinates strides{}; |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 120 | ITensor *prev = input; |
| 121 | for(uint32_t i = 0; i < _num_dimensions; ++i) |
| 122 | { |
| 123 | // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again. |
| 124 | if(i > 0) |
| 125 | { |
| 126 | strides.set(i - 1, 1); |
| 127 | } |
| 128 | |
| 129 | if(_padding[i].first > 0 || _padding[i].second > 0) |
| 130 | { |
| 131 | // Set the starts, ends, and strides values for the current dimension. |
| 132 | // Due to the bit masks passed to strided slice, the values below the current dimension in |
| 133 | // starts and ends will be ignored so do not need to be modified. |
| 134 | if(_mode == PaddingMode::REFLECT) |
| 135 | { |
| 136 | starts_before.set(i, _padding[i].first); |
| 137 | ends_before.set(i, 0); |
| 138 | starts_after.set(i, input->info()->dimension(i) - 2); |
| 139 | ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2); |
| 140 | strides.set(i, -1); |
| 141 | } |
| 142 | else |
| 143 | { |
| 144 | starts_before.set(i, _padding[i].first - 1); |
| 145 | ends_before.set(i, -1); |
| 146 | starts_after.set(i, input->info()->dimension(i) - 1); |
| 147 | ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1); |
| 148 | strides.set(i, -1); |
| 149 | } |
| 150 | |
| 151 | // Strided slice wraps negative indexes around to the end of the range, |
| 152 | // instead this should indicate use of the full range and so the bit mask will be modified. |
| 153 | const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i); |
| 154 | const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i); |
| 155 | const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i); |
| 156 | const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i); |
| 157 | |
| 158 | // Reflect the input values for the padding before and after the input. |
| 159 | std::vector<ITensor *> concat_vector; |
| 160 | if(_padding[i].first > 0) |
| 161 | { |
| 162 | if(i < prev->info()->num_dimensions()) |
| 163 | { |
| 164 | _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before); |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 165 | concat_vector.emplace_back(&_slice_results[2 * i]); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 166 | } |
| 167 | else |
| 168 | { |
| 169 | // Performing the slice is unnecessary if the result would simply be a copy of the tensor. |
| 170 | concat_vector.push_back(prev); |
| 171 | } |
| 172 | } |
| 173 | concat_vector.push_back(prev); |
| 174 | if(_padding[i].second > 0) |
| 175 | { |
| 176 | if(i < prev->info()->num_dimensions()) |
| 177 | { |
| 178 | _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after); |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 179 | concat_vector.emplace_back(&_slice_results[2 * i + 1]); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 180 | } |
| 181 | else |
| 182 | { |
| 183 | // Performing the slice is unnecessary if the result would simply be a copy of the tensor. |
| 184 | concat_vector.push_back(prev); |
| 185 | } |
| 186 | } |
| 187 | // Concatenate the padding before and after with the input. |
| 188 | ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i]; |
Georgios Pinitas | 9e4824c | 2019-04-12 13:15:58 +0100 | [diff] [blame] | 189 | _concat_functions[i].configure(concat_vector, out, i); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 190 | if(i != _num_dimensions - 1) |
| 191 | { |
| 192 | _concat_results[i].allocator()->allocate(); |
| 193 | } |
| 194 | prev = out; |
| 195 | } |
| 196 | _slice_results[2 * i].allocator()->allocate(); |
| 197 | _slice_results[2 * i + 1].allocator()->allocate(); |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode) |
| 202 | { |
| 203 | ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode)); |
| 204 | |
| 205 | _padding = padding; |
| 206 | _mode = mode; |
| 207 | |
| 208 | const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding); |
| 209 | |
| 210 | auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape)); |
| 211 | |
| 212 | // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied. |
| 213 | _num_dimensions = last_padding_dimension(padding) + 1; |
| 214 | if(_num_dimensions > 0) |
| 215 | { |
| 216 | switch(_mode) |
| 217 | { |
| 218 | case PaddingMode::CONSTANT: |
| 219 | { |
| 220 | configure_constant_mode(input, output, padding, constant_value); |
| 221 | break; |
| 222 | } |
| 223 | case PaddingMode::REFLECT: |
| 224 | case PaddingMode::SYMMETRIC: |
| 225 | { |
| 226 | configure_reflect_symmetric_mode(input, output); |
| 227 | break; |
| 228 | } |
| 229 | default: |
| 230 | ARM_COMPUTE_ERROR("Padding mode not supported."); |
| 231 | } |
| 232 | } |
| 233 | else |
| 234 | { |
| 235 | // Copy the input to the whole output if no padding is applied |
| 236 | _copy_kernel.configure(input, output); |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode) |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 241 | { |
| 242 | ARM_COMPUTE_UNUSED(constant_value); |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 243 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 244 | const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding); |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 245 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 246 | if(output->total_size() > 0) |
| 247 | { |
| 248 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape); |
| 249 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); |
| 250 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 251 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 252 | switch(mode) |
| 253 | { |
| 254 | case PaddingMode::CONSTANT: |
| 255 | { |
| 256 | auto output_clone = output->clone(); |
| 257 | SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true); |
| 258 | ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding)); |
| 259 | ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info)); |
| 260 | break; |
| 261 | } |
| 262 | case PaddingMode::REFLECT: |
| 263 | case PaddingMode::SYMMETRIC: |
| 264 | { |
| 265 | for(uint32_t i = 0; i < padding.size(); ++i) |
| 266 | { |
| 267 | if(mode == PaddingMode::REFLECT) |
| 268 | { |
| 269 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i)); |
| 270 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i)); |
| 271 | } |
| 272 | else |
| 273 | { |
| 274 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i)); |
| 275 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i)); |
| 276 | } |
| 277 | } |
| 278 | break; |
| 279 | } |
| 280 | default: |
| 281 | { |
| 282 | ARM_COMPUTE_ERROR("Invalid mode"); |
| 283 | } |
| 284 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 285 | return Status{}; |
| 286 | } |
| 287 | |
| 288 | void NEPadLayer::run() |
| 289 | { |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 290 | if(_num_dimensions > 0) |
| 291 | { |
| 292 | switch(_mode) |
| 293 | { |
| 294 | case PaddingMode::CONSTANT: |
| 295 | { |
| 296 | NEScheduler::get().schedule(&_memset_kernel, Window::DimY); |
| 297 | NEScheduler::get().schedule(&_copy_kernel, Window::DimY); |
| 298 | break; |
| 299 | } |
| 300 | case PaddingMode::REFLECT: |
| 301 | case PaddingMode::SYMMETRIC: |
| 302 | { |
| 303 | for(uint32_t i = 0; i < _num_dimensions; ++i) |
| 304 | { |
| 305 | if(_padding[i].first > 0 || _padding[i].second > 0) |
| 306 | { |
| 307 | if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0) |
| 308 | { |
| 309 | _slice_functions[2 * i].run(); |
| 310 | } |
| 311 | if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0) |
| 312 | { |
| 313 | _slice_functions[2 * i + 1].run(); |
| 314 | } |
| 315 | _concat_functions[i].run(); |
| 316 | } |
| 317 | } |
| 318 | break; |
| 319 | } |
| 320 | default: |
| 321 | ARM_COMPUTE_ERROR("Padding mode not supported."); |
| 322 | } |
| 323 | } |
| 324 | else |
| 325 | { |
| 326 | NEScheduler::get().schedule(&_copy_kernel, Window::DimY); |
| 327 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 328 | } |
| 329 | } // namespace arm_compute |