Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 1 | /* |
Michele Di Giorgio | d9eaf61 | 2020-07-08 11:12:57 +0100 | [diff] [blame] | 2 | * Copyright (c) 2018-2020 Arm Limited. |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/runtime/NEON/functions/NEPadLayer.h" |
| 25 | |
| 26 | #include "arm_compute/runtime/NEON/NEScheduler.h" |
| 27 | |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 28 | #include "arm_compute/core/Types.h" |
| 29 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
| 30 | |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 31 | namespace arm_compute |
| 32 | { |
| 33 | namespace |
| 34 | { |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 35 | uint32_t last_padding_dimension(const PaddingList &padding) |
| 36 | { |
| 37 | int last_padding_dim = padding.size() - 1; |
| 38 | for(; last_padding_dim >= 0; --last_padding_dim) |
| 39 | { |
| 40 | if(padding[last_padding_dim].first > 0 || padding[last_padding_dim].second > 0) |
| 41 | { |
| 42 | break; |
| 43 | } |
| 44 | } |
| 45 | return static_cast<uint32_t>(last_padding_dim); |
| 46 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 47 | } // namespace |
| 48 | |
| 49 | NEPadLayer::NEPadLayer() |
Manuel Bottini | 9032ee3 | 2019-08-07 17:04:11 +0100 | [diff] [blame] | 50 | : _copy_kernel(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results() |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 51 | { |
| 52 | } |
| 53 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 54 | void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value) |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 55 | { |
Manuel Bottini | 9032ee3 | 2019-08-07 17:04:11 +0100 | [diff] [blame] | 56 | _pad_kernel.configure(input, output, padding, constant_value, PaddingMode::CONSTANT); |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 57 | } |
| 58 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 59 | void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output) |
| 60 | { |
| 61 | // Reflecting can be performed by effectively unfolding the input as follows: |
| 62 | // For each dimension starting at DimX: |
| 63 | // For before and after: |
| 64 | // Use strided slice to extract and reverse the part of the |
| 65 | // input / previously produced tensor required for the padding. |
| 66 | // Concatenate the before and after padding with the input / previously |
| 67 | // produced tensor along the current dimension. |
| 68 | |
| 69 | // Two strided slice functions will be required for each dimension padded as well as a |
| 70 | // concatenate function and the tensors to hold the temporary results. |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 71 | _slice_functions.resize(2 * _num_dimensions); |
| 72 | _slice_results.resize(2 * _num_dimensions); |
| 73 | _concat_functions.resize(_num_dimensions); |
| 74 | _concat_results.resize(_num_dimensions - 1); |
| 75 | |
| 76 | Coordinates starts_before{}; |
| 77 | Coordinates ends_before{}; |
| 78 | Coordinates starts_after{}; |
| 79 | Coordinates ends_after{}; |
| 80 | Coordinates strides{}; |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 81 | ITensor *prev = input; |
| 82 | for(uint32_t i = 0; i < _num_dimensions; ++i) |
| 83 | { |
| 84 | // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again. |
| 85 | if(i > 0) |
| 86 | { |
| 87 | strides.set(i - 1, 1); |
| 88 | } |
| 89 | |
| 90 | if(_padding[i].first > 0 || _padding[i].second > 0) |
| 91 | { |
| 92 | // Set the starts, ends, and strides values for the current dimension. |
| 93 | // Due to the bit masks passed to strided slice, the values below the current dimension in |
| 94 | // starts and ends will be ignored so do not need to be modified. |
| 95 | if(_mode == PaddingMode::REFLECT) |
| 96 | { |
| 97 | starts_before.set(i, _padding[i].first); |
| 98 | ends_before.set(i, 0); |
| 99 | starts_after.set(i, input->info()->dimension(i) - 2); |
| 100 | ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2); |
| 101 | strides.set(i, -1); |
| 102 | } |
| 103 | else |
| 104 | { |
| 105 | starts_before.set(i, _padding[i].first - 1); |
| 106 | ends_before.set(i, -1); |
| 107 | starts_after.set(i, input->info()->dimension(i) - 1); |
| 108 | ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1); |
| 109 | strides.set(i, -1); |
| 110 | } |
| 111 | |
| 112 | // Strided slice wraps negative indexes around to the end of the range, |
| 113 | // instead this should indicate use of the full range and so the bit mask will be modified. |
| 114 | const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i); |
| 115 | const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i); |
| 116 | const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i); |
| 117 | const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i); |
| 118 | |
| 119 | // Reflect the input values for the padding before and after the input. |
Georgios Pinitas | 4667ddd | 2020-07-13 21:21:33 +0100 | [diff] [blame] | 120 | std::vector<const ITensor *> concat_vector; |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 121 | if(_padding[i].first > 0) |
| 122 | { |
| 123 | if(i < prev->info()->num_dimensions()) |
| 124 | { |
| 125 | _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before); |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 126 | concat_vector.emplace_back(&_slice_results[2 * i]); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 127 | } |
| 128 | else |
| 129 | { |
| 130 | // Performing the slice is unnecessary if the result would simply be a copy of the tensor. |
| 131 | concat_vector.push_back(prev); |
| 132 | } |
| 133 | } |
| 134 | concat_vector.push_back(prev); |
| 135 | if(_padding[i].second > 0) |
| 136 | { |
| 137 | if(i < prev->info()->num_dimensions()) |
| 138 | { |
| 139 | _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after); |
Michalis Spyrou | bcfd09a | 2019-05-01 13:03:59 +0100 | [diff] [blame] | 140 | concat_vector.emplace_back(&_slice_results[2 * i + 1]); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 141 | } |
| 142 | else |
| 143 | { |
| 144 | // Performing the slice is unnecessary if the result would simply be a copy of the tensor. |
| 145 | concat_vector.push_back(prev); |
| 146 | } |
| 147 | } |
| 148 | // Concatenate the padding before and after with the input. |
| 149 | ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i]; |
Georgios Pinitas | 9e4824c | 2019-04-12 13:15:58 +0100 | [diff] [blame] | 150 | _concat_functions[i].configure(concat_vector, out, i); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 151 | if(i != _num_dimensions - 1) |
| 152 | { |
| 153 | _concat_results[i].allocator()->allocate(); |
| 154 | } |
| 155 | prev = out; |
| 156 | } |
| 157 | _slice_results[2 * i].allocator()->allocate(); |
| 158 | _slice_results[2 * i + 1].allocator()->allocate(); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode) |
| 163 | { |
| 164 | ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode)); |
| 165 | |
| 166 | _padding = padding; |
| 167 | _mode = mode; |
| 168 | |
| 169 | const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding); |
| 170 | |
| 171 | auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape)); |
| 172 | |
| 173 | // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied. |
| 174 | _num_dimensions = last_padding_dimension(padding) + 1; |
| 175 | if(_num_dimensions > 0) |
| 176 | { |
| 177 | switch(_mode) |
| 178 | { |
| 179 | case PaddingMode::CONSTANT: |
| 180 | { |
| 181 | configure_constant_mode(input, output, padding, constant_value); |
| 182 | break; |
| 183 | } |
| 184 | case PaddingMode::REFLECT: |
| 185 | case PaddingMode::SYMMETRIC: |
| 186 | { |
| 187 | configure_reflect_symmetric_mode(input, output); |
| 188 | break; |
| 189 | } |
| 190 | default: |
| 191 | ARM_COMPUTE_ERROR("Padding mode not supported."); |
| 192 | } |
| 193 | } |
| 194 | else |
| 195 | { |
| 196 | // Copy the input to the whole output if no padding is applied |
| 197 | _copy_kernel.configure(input, output); |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode) |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 202 | { |
| 203 | ARM_COMPUTE_UNUSED(constant_value); |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 204 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 205 | const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding); |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 206 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 207 | if(output->total_size() > 0) |
| 208 | { |
| 209 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape); |
| 210 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); |
| 211 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 212 | |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 213 | switch(mode) |
| 214 | { |
| 215 | case PaddingMode::CONSTANT: |
| 216 | { |
Manuel Bottini | 9032ee3 | 2019-08-07 17:04:11 +0100 | [diff] [blame] | 217 | return NEPadLayerKernel::validate(input, output, padding, constant_value, mode); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 218 | } |
| 219 | case PaddingMode::REFLECT: |
| 220 | case PaddingMode::SYMMETRIC: |
| 221 | { |
| 222 | for(uint32_t i = 0; i < padding.size(); ++i) |
| 223 | { |
| 224 | if(mode == PaddingMode::REFLECT) |
| 225 | { |
| 226 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i)); |
| 227 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i)); |
| 228 | } |
| 229 | else |
| 230 | { |
| 231 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i)); |
| 232 | ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i)); |
| 233 | } |
| 234 | } |
| 235 | break; |
| 236 | } |
| 237 | default: |
| 238 | { |
| 239 | ARM_COMPUTE_ERROR("Invalid mode"); |
| 240 | } |
| 241 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 242 | return Status{}; |
| 243 | } |
| 244 | |
| 245 | void NEPadLayer::run() |
| 246 | { |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 247 | if(_num_dimensions > 0) |
| 248 | { |
| 249 | switch(_mode) |
| 250 | { |
| 251 | case PaddingMode::CONSTANT: |
| 252 | { |
Manuel Bottini | 9032ee3 | 2019-08-07 17:04:11 +0100 | [diff] [blame] | 253 | NEScheduler::get().schedule(&_pad_kernel, Window::DimZ); |
Usama Arif | 8cf8c11 | 2019-03-14 15:36:54 +0000 | [diff] [blame] | 254 | break; |
| 255 | } |
| 256 | case PaddingMode::REFLECT: |
| 257 | case PaddingMode::SYMMETRIC: |
| 258 | { |
| 259 | for(uint32_t i = 0; i < _num_dimensions; ++i) |
| 260 | { |
| 261 | if(_padding[i].first > 0 || _padding[i].second > 0) |
| 262 | { |
| 263 | if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0) |
| 264 | { |
| 265 | _slice_functions[2 * i].run(); |
| 266 | } |
| 267 | if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0) |
| 268 | { |
| 269 | _slice_functions[2 * i + 1].run(); |
| 270 | } |
| 271 | _concat_functions[i].run(); |
| 272 | } |
| 273 | } |
| 274 | break; |
| 275 | } |
| 276 | default: |
| 277 | ARM_COMPUTE_ERROR("Padding mode not supported."); |
| 278 | } |
| 279 | } |
| 280 | else |
| 281 | { |
| 282 | NEScheduler::get().schedule(&_copy_kernel, Window::DimY); |
| 283 | } |
Georgios Pinitas | dea2d2d | 2018-12-19 16:23:17 +0000 | [diff] [blame] | 284 | } |
| 285 | } // namespace arm_compute |