Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016, 2017 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #ifndef __ARM_COMPUTE_HELPERS_H__ |
| 25 | #define __ARM_COMPUTE_HELPERS_H__ |
| 26 | |
| 27 | #include "arm_compute/core/CL/CLTypes.h" |
| 28 | #include "arm_compute/core/Coordinates.h" |
| 29 | #include "arm_compute/core/IAccessWindow.h" |
| 30 | #include "arm_compute/core/Steps.h" |
| 31 | #include "arm_compute/core/Strides.h" |
| 32 | #include "arm_compute/core/TensorShape.h" |
| 33 | #include "arm_compute/core/Types.h" |
| 34 | #include "arm_compute/core/Window.h" |
| 35 | #include <array> |
| 36 | #include <cstddef> |
| 37 | #include <cstdint> |
| 38 | #include <memory> |
| 39 | #include <tuple> |
| 40 | #include <type_traits> |
| 41 | #include <utility> |
| 42 | |
| 43 | namespace arm_compute |
| 44 | { |
| 45 | class IKernel; |
| 46 | class ITensor; |
| 47 | class ITensorInfo; |
| 48 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 49 | template <typename T> |
| 50 | struct enable_bitwise_ops |
| 51 | { |
| 52 | static constexpr bool value = false; |
| 53 | }; |
| 54 | |
| 55 | template <typename T> |
| 56 | typename std::enable_if<enable_bitwise_ops<T>::value, T>::type operator&(T lhs, T rhs) |
| 57 | { |
| 58 | using underlying_type = typename std::underlying_type<T>::type; |
| 59 | return static_cast<T>(static_cast<underlying_type>(lhs) & static_cast<underlying_type>(rhs)); |
| 60 | } |
| 61 | |
| 62 | namespace traits |
| 63 | { |
| 64 | /** Check if a type T is contained in a tuple Tuple of types */ |
| 65 | template <typename T, typename Tuple> |
| 66 | struct is_contained; |
| 67 | |
| 68 | template <typename T> |
| 69 | struct is_contained<T, std::tuple<>> : std::false_type |
| 70 | { |
| 71 | }; |
| 72 | |
| 73 | template <typename T, typename... Ts> |
| 74 | struct is_contained<T, std::tuple<T, Ts...>> : std::true_type |
| 75 | { |
| 76 | }; |
| 77 | |
| 78 | template <typename T, typename U, typename... Ts> |
| 79 | struct is_contained<T, std::tuple<U, Ts...>> : is_contained<T, std::tuple<Ts...>> |
| 80 | { |
| 81 | }; |
| 82 | } |
| 83 | |
| 84 | /** Computes bilinear interpolation using the pointer to the top-left pixel and the pixel's distance between |
| 85 | * the real coordinates and the smallest following integer coordinates. |
| 86 | * |
| 87 | * @param[in] pixel_ptr Pointer to the top-left pixel value. Format: Single channel U8 |
| 88 | * @param[in] stride Stride to access the bottom-left and bottom-right pixel values |
| 89 | * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer |
| 90 | * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer |
| 91 | * |
| 92 | * @note dx and dy must be in the range [0, 1.0] |
| 93 | * |
| 94 | * @return The bilinear interpolated pixel value |
| 95 | */ |
| 96 | inline uint8_t delta_bilinear_c1u8(const uint8_t *pixel_ptr, size_t stride, float dx, float dy); |
| 97 | |
| 98 | /** Return the pixel at (x,y) using bilinear interpolation. The image must be single channel U8 |
| 99 | * |
| 100 | * @warning Only works if the iterator was created with an IImage |
| 101 | * |
| 102 | * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image. |
| 103 | * @param[in] stride Stride in bytes of the image; |
| 104 | * @param[in] x X position of the wanted pixel |
| 105 | * @param[in] y Y position of the wanted pixel |
| 106 | * |
| 107 | * @return The pixel at (x, y) using bilinear interpolation. |
| 108 | */ |
| 109 | inline uint8_t pixel_bilinear_c1u8(const uint8_t *first_pixel_ptr, size_t stride, float x, float y); |
| 110 | |
| 111 | /** Return the pixel at (x,y) using bilinear interpolation by clamping when out of borders. The image must be single channel U8 |
| 112 | * |
| 113 | * @warning Only works if the iterator was created with an IImage |
| 114 | * |
| 115 | * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image. |
| 116 | * @param[in] stride Stride in bytes of the image |
| 117 | * @param[in] width Width of the image |
| 118 | * @param[in] height Height of the image |
| 119 | * @param[in] x X position of the wanted pixel |
| 120 | * @param[in] y Y position of the wanted pixel |
| 121 | * |
| 122 | * @return The pixel at (x, y) using bilinear interpolation. |
| 123 | */ |
| 124 | inline uint8_t pixel_bilinear_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float x, float y); |
| 125 | |
| 126 | /** Return the pixel at (x,y) using area interpolation by clamping when out of borders. The image must be single channel U8 |
| 127 | * |
| 128 | * @note The interpolation area depends on the width and height ration of the input and output images |
| 129 | * @note Currently average of the contributing pixels is calculated |
| 130 | * |
| 131 | * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image. |
| 132 | * @param[in] stride Stride in bytes of the image |
| 133 | * @param[in] width Width of the image |
| 134 | * @param[in] height Height of the image |
| 135 | * @param[in] wr Width ratio among the input image width and output image width. |
| 136 | * @param[in] hr Height ratio among the input image height and output image height. |
| 137 | * @param[in] x X position of the wanted pixel |
| 138 | * @param[in] y Y position of the wanted pixel |
| 139 | * |
| 140 | * @return The pixel at (x, y) using area interpolation. |
| 141 | */ |
| 142 | inline uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y); |
| 143 | |
| 144 | /** Performs clamping among a lower and upper value. |
| 145 | * |
| 146 | * @param[in] n Value to clamp. |
| 147 | * @param[in] lower Lower threshold. |
| 148 | * @param[in] upper Upper threshold. |
| 149 | * |
| 150 | * @return Clamped value. |
| 151 | */ |
| 152 | template <typename T> |
| 153 | inline T clamp(const T &n, const T &lower, const T &upper) |
| 154 | { |
| 155 | return std::max(lower, std::min(n, upper)); |
| 156 | } |
| 157 | |
| 158 | /** Base case of for_each. Does nothing. */ |
| 159 | template <typename F> |
| 160 | inline void for_each(F &&) |
| 161 | { |
| 162 | } |
| 163 | |
| 164 | /** Call the function for each of the arguments |
| 165 | * |
| 166 | * @param[in] func Function to be called |
| 167 | * @param[in] arg Argument passed to the function |
| 168 | * @param[in] args Remaining arguments |
| 169 | */ |
| 170 | template <typename F, typename T, typename... Ts> |
| 171 | inline void for_each(F &&func, T &&arg, Ts &&... args) |
| 172 | { |
| 173 | func(arg); |
| 174 | for_each(func, args...); |
| 175 | } |
| 176 | |
| 177 | /** Base case of foldl. |
| 178 | * |
| 179 | * @return value. |
| 180 | */ |
| 181 | template <typename F, typename T> |
| 182 | inline T foldl(F &&, const T &value) |
| 183 | { |
| 184 | return value; |
| 185 | } |
| 186 | |
| 187 | /** Base case of foldl. |
| 188 | * |
| 189 | * @return Function evaluation for value1 and value2 |
| 190 | */ |
| 191 | template <typename F, typename T, typename U> |
| 192 | inline auto foldl(F &&func, T &&value1, U &&value2) -> decltype(func(value1, value2)) |
| 193 | { |
| 194 | return func(value1, value2); |
| 195 | } |
| 196 | |
| 197 | /** Fold left. |
| 198 | * |
| 199 | * @param[in] func Function to be called |
| 200 | * @param[in] initial Initial value |
| 201 | * @param[in] value Argument passed to the function |
| 202 | * @param[in] values Remaining arguments |
| 203 | */ |
| 204 | template <typename F, typename I, typename T, typename... Vs> |
| 205 | inline I foldl(F &&func, I &&initial, T &&value, Vs &&... values) |
| 206 | { |
| 207 | return foldl(std::forward<F>(func), func(std::forward<I>(initial), std::forward<T>(value)), std::forward<Vs>(values)...); |
| 208 | } |
| 209 | |
| 210 | /** Iterator updated by @ref execute_window_loop for each window element */ |
| 211 | class Iterator |
| 212 | { |
| 213 | public: |
| 214 | /** Default constructor to create an empty iterator */ |
| 215 | constexpr Iterator(); |
| 216 | /** Create a container iterator for the metadata and allocation contained in the ITensor |
| 217 | * |
| 218 | * @param[in] tensor The tensor to associate to the iterator. |
| 219 | * @param[in] window The window which will be used to iterate over the tensor. |
| 220 | */ |
| 221 | Iterator(const ITensor *tensor, const Window &window); |
| 222 | |
| 223 | /** Increment the iterator along the specified dimension of the step value associated to the dimension. |
| 224 | * |
| 225 | * @warning It is the caller's responsibility to call increment(dimension+1) when reaching the end of a dimension, the iterator will not check for overflow. |
| 226 | * |
| 227 | * @note When incrementing a dimension 'n' the coordinates of all the dimensions in the range (0,n-1) are reset. For example if you iterate over a 2D image, everytime you change row (dimension 1), the iterator for the width (dimension 0) is reset to its start. |
| 228 | * |
| 229 | * @param[in] dimension Dimension to increment |
| 230 | */ |
| 231 | void increment(size_t dimension); |
| 232 | |
| 233 | /** Return the offset in bytes from the first element to the current position of the iterator |
| 234 | * |
| 235 | * @return The current position of the iterator in bytes relative to the first element. |
| 236 | */ |
| 237 | constexpr int offset() const; |
| 238 | |
| 239 | /** Return a pointer to the current pixel. |
| 240 | * |
| 241 | * @warning Only works if the iterator was created with an ITensor. |
| 242 | * |
| 243 | * @return equivalent to buffer() + offset() |
| 244 | */ |
| 245 | constexpr uint8_t *ptr() const; |
| 246 | |
| 247 | /** Move the iterator back to the beginning of the specified dimension. |
| 248 | * |
| 249 | * @param[in] dimension Dimension to reset |
| 250 | */ |
| 251 | void reset(size_t dimension); |
| 252 | |
| 253 | private: |
| 254 | uint8_t *_ptr; |
| 255 | |
| 256 | class Dimension |
| 257 | { |
| 258 | public: |
| 259 | constexpr Dimension() |
| 260 | : _dim_start(0), _stride(0) |
| 261 | { |
| 262 | } |
| 263 | |
| 264 | int _dim_start; |
| 265 | int _stride; |
| 266 | }; |
| 267 | |
| 268 | std::array<Dimension, Coordinates::num_max_dimensions> _dims; |
| 269 | }; |
| 270 | |
| 271 | /** Iterate through the passed window, automatically adjusting the iterators and calling the lambda_functino for each element. |
| 272 | * It passes the x and y positions to the lambda_function for each iteration |
| 273 | * |
| 274 | * @param[in] w Window to iterate through. |
| 275 | * @param[in] lambda_function The function of type void(function)( const Coordinates & id ) to call at each iteration. |
| 276 | * Where id represents the absolute coordinates of the item to process. |
| 277 | * @param[in,out] iterators Tensor iterators which will be updated by this function before calling lambda_function. |
| 278 | */ |
| 279 | template <typename L, typename... Ts> |
| 280 | inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators); |
| 281 | |
| 282 | /** Update window and padding size for each of the access patterns. |
| 283 | * |
| 284 | * First the window size is reduced based on all access patterns that are not |
| 285 | * allowed to modify the padding of the underlying tensor. Then the padding of |
| 286 | * the remaining tensors is increased to match the window. |
| 287 | * |
| 288 | * @param[in] win Window that is used by the kernel. |
| 289 | * @param[in] patterns Access patterns used to calculate the final window and padding. |
| 290 | * |
| 291 | * @return True if the window has been changed. Changes to the padding do not |
| 292 | * influence the returned value. |
| 293 | */ |
| 294 | template <typename... Ts> |
| 295 | bool update_window_and_padding(Window &win, Ts &&... patterns) |
| 296 | { |
| 297 | bool window_changed = false; |
| 298 | |
| 299 | for_each([&](const IAccessWindow & w) |
| 300 | { |
| 301 | window_changed |= w.update_window_if_needed(win); |
| 302 | }, |
| 303 | patterns...); |
| 304 | |
| 305 | bool padding_changed = false; |
| 306 | |
| 307 | for_each([&](const IAccessWindow & w) |
| 308 | { |
| 309 | padding_changed |= w.update_padding_if_needed(win); |
| 310 | }, |
| 311 | patterns...); |
| 312 | |
| 313 | return window_changed; |
| 314 | } |
| 315 | |
| 316 | /** Calculate the maximum window for a given tensor shape and border setting |
| 317 | * |
| 318 | * @param[in] info Tensor info object defining the shape of the object for which the window is created. |
| 319 | * @param[in] steps (Optional) Number of elements processed for each step. |
| 320 | * @param[in] skip_border (Optional) If true exclude the border region from the window. |
| 321 | * @param[in] border_size (Optional) Border size. |
| 322 | * |
| 323 | * @return The maximum window the kernel can be executed on. |
| 324 | */ |
| 325 | Window calculate_max_window(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize()); |
| 326 | |
| 327 | /** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting |
| 328 | * |
| 329 | * @param[in] info Tensor info object defining the shape of the object for which the window is created. |
| 330 | * @param[in] steps (Optional) Number of elements processed for each step. |
| 331 | * @param[in] skip_border (Optional) If true exclude the border region from the window. |
| 332 | * @param[in] border_size (Optional) Border size. The border region will be excluded from the window. |
| 333 | * |
| 334 | * @return The maximum window the kernel can be executed on. |
| 335 | */ |
| 336 | Window calculate_max_window_horizontal(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize()); |
| 337 | |
| 338 | /** Calculate the maximum window for a given tensor shape and border setting. The window will also includes the border. |
| 339 | * |
| 340 | * @param[in] info Tensor info object defining the shape of the object for which the window is created. |
| 341 | * @param[in] steps (Optional) Number of elements processed for each step. |
| 342 | * @param[in] border_size (Optional) Border size. The border region will be included in the window. |
| 343 | * |
| 344 | * @return The maximum window the kernel can be executed on. |
| 345 | */ |
| 346 | Window calculate_max_enlarged_window(const ITensorInfo &info, const Steps &steps = Steps(), BorderSize border_size = BorderSize()); |
| 347 | |
| 348 | /** Intersect multiple valid regions. |
| 349 | * |
| 350 | * @param[in] regions Valid regions. |
| 351 | * |
| 352 | * @return Intersection of all regions. |
| 353 | */ |
| 354 | template <typename... Ts> |
| 355 | ValidRegion intersect_valid_regions(Ts &&... regions) |
| 356 | { |
| 357 | auto intersect = [](const ValidRegion & r1, const ValidRegion & r2) -> ValidRegion |
| 358 | { |
| 359 | ValidRegion region; |
| 360 | |
| 361 | for(size_t d = 0; d < std::min(r1.anchor.num_dimensions(), r2.anchor.num_dimensions()); ++d) |
| 362 | { |
| 363 | region.anchor.set(d, std::max(r1.anchor[d], r2.anchor[d])); |
| 364 | } |
| 365 | |
| 366 | for(size_t d = 0; d < std::min(r1.shape.num_dimensions(), r2.shape.num_dimensions()); ++d) |
| 367 | { |
| 368 | region.shape.set(d, std::min(r1.shape[d], r2.shape[d])); |
| 369 | } |
| 370 | |
| 371 | return region; |
| 372 | }; |
| 373 | |
| 374 | return foldl(intersect, std::forward<Ts>(regions)...); |
| 375 | } |
| 376 | |
| 377 | /** Create a strides object based on the provided strides and the tensor dimensions. |
| 378 | * |
| 379 | * @param[in] info Tensor info object providing the shape of the tensor for unspecified strides. |
| 380 | * @param[in] stride_x Stride to be used in X dimension (in bytes). |
| 381 | * @param[in] fixed_strides Strides to be used in higher dimensions starting at Y (in bytes). |
| 382 | * |
| 383 | * @return Strides object based on the specified strides. Missing strides are |
| 384 | * calculated based on the tensor shape and the strides of lower dimensions. |
| 385 | */ |
| 386 | template <typename T, typename... Ts> |
| 387 | inline Strides compute_strides(const ITensorInfo &info, T stride_x, Ts &&... fixed_strides) |
| 388 | { |
| 389 | const TensorShape &shape = info.tensor_shape(); |
| 390 | |
| 391 | // Create strides object |
| 392 | Strides strides(stride_x, fixed_strides...); |
| 393 | |
| 394 | for(size_t i = 1 + sizeof...(Ts); i < info.num_dimensions(); ++i) |
| 395 | { |
| 396 | strides.set(i, shape[i - 1] * strides[i - 1]); |
| 397 | } |
| 398 | |
| 399 | return strides; |
| 400 | } |
| 401 | |
| 402 | /** Create a strides object based on the tensor dimensions. |
| 403 | * |
| 404 | * @param[in] info Tensor info object used to compute the strides. |
| 405 | * |
| 406 | * @return Strides object based on element size and tensor shape. |
| 407 | */ |
| 408 | template <typename... Ts> |
| 409 | inline Strides compute_strides(const ITensorInfo &info) |
| 410 | { |
| 411 | return compute_strides(info, info.element_size()); |
| 412 | } |
| 413 | |
| 414 | /* Auto initialize the tensor info (shape, number of channels, data type and fixed point position) if the current assignment is empty. |
| 415 | * |
| 416 | * @param[in,out] info Tensor info used to check and assign. |
| 417 | * @param[in] shape New shape. |
| 418 | * @param[in] num_channels New number of channels. |
| 419 | * @param[in] data_type New data type |
| 420 | * @param[in] fixed_point_position New fixed point position |
| 421 | * |
| 422 | * @return True if the tensor info has been initialized |
| 423 | */ |
| 424 | bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, int fixed_point_position); |
| 425 | |
| 426 | /* Set the shape to the specified value if the current assignment is empty. |
| 427 | * |
| 428 | * @param[in,out] info Tensor info used to check and assign. |
| 429 | * @param[in] shape New shape. |
| 430 | * |
| 431 | * @return True if the shape has been changed. |
| 432 | */ |
| 433 | bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape); |
| 434 | |
| 435 | /* Set the format, data type and number of channels to the specified value if |
| 436 | * the current data type is unknown. |
| 437 | * |
| 438 | * @param[in,out] info Tensor info used to check and assign. |
| 439 | * @param[in] format New format. |
| 440 | * |
| 441 | * @return True if the format has been changed. |
| 442 | */ |
| 443 | bool set_format_if_unknown(ITensorInfo &info, Format format); |
| 444 | |
| 445 | /* Set the data type and number of channels to the specified value if |
| 446 | * the current data type is unknown. |
| 447 | * |
| 448 | * @param[in,out] info Tensor info used to check and assign. |
| 449 | * @param[in] data_type New data type. |
| 450 | * |
| 451 | * @return True if the data type has been changed. |
| 452 | */ |
| 453 | bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type); |
| 454 | |
| 455 | /* Set the fixed point position to the specified value if |
| 456 | * the current fixed point position is 0 and the data type is QS8 or QS16 |
| 457 | * |
| 458 | * @param[in,out] info Tensor info used to check and assign. |
| 459 | * @param[in] fixed_point_position New fixed point position |
| 460 | * |
| 461 | * @return True if the fixed point position has been changed. |
| 462 | */ |
| 463 | bool set_fixed_point_position_if_zero(ITensorInfo &info, int fixed_point_position); |
| 464 | } // namespace arm_compute |
| 465 | |
| 466 | #include "arm_compute/core/Helpers.inl" |
| 467 | #endif /*__ARM_COMPUTE_HELPERS_H__ */ |