Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016, 2017 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/core/Error.h" |
| 25 | #include "arm_compute/core/Validate.h" |
| 26 | |
| 27 | #include <cmath> |
| 28 | #include <numeric> |
| 29 | |
| 30 | namespace arm_compute |
| 31 | { |
| 32 | inline uint8_t delta_bilinear_c1u8(const uint8_t *pixel_ptr, size_t stride, float dx, float dy) |
| 33 | { |
| 34 | ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr); |
| 35 | |
| 36 | const float dx1 = 1.0f - dx; |
| 37 | const float dy1 = 1.0f - dy; |
| 38 | |
| 39 | const float a00 = *pixel_ptr; |
| 40 | const float a01 = *(pixel_ptr + 1); |
| 41 | const float a10 = *(pixel_ptr + stride); |
| 42 | const float a11 = *(pixel_ptr + stride + 1); |
| 43 | |
| 44 | const float w1 = dx1 * dy1; |
| 45 | const float w2 = dx * dy1; |
| 46 | const float w3 = dx1 * dy; |
| 47 | const float w4 = dx * dy; |
| 48 | |
| 49 | return a00 * w1 + a01 * w2 + a10 * w3 + a11 * w4; |
| 50 | } |
| 51 | |
| 52 | inline uint8_t pixel_bilinear_c1u8(const uint8_t *first_pixel_ptr, size_t stride, float x, float y) |
| 53 | { |
| 54 | ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr); |
| 55 | |
| 56 | const int32_t xi = x; |
| 57 | const int32_t yi = y; |
| 58 | |
| 59 | const float dx = x - xi; |
| 60 | const float dy = y - yi; |
| 61 | |
| 62 | return delta_bilinear_c1u8(first_pixel_ptr + xi + yi * stride, stride, dx, dy); |
| 63 | } |
| 64 | |
| 65 | inline uint8_t pixel_bilinear_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float x, float y) |
| 66 | { |
| 67 | ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr); |
| 68 | |
| 69 | x = std::max(-1.f, std::min(x, static_cast<float>(width))); |
| 70 | y = std::max(-1.f, std::min(y, static_cast<float>(height))); |
| 71 | |
| 72 | const float xi = std::floor(x); |
| 73 | const float yi = std::floor(y); |
| 74 | |
| 75 | const float dx = x - xi; |
| 76 | const float dy = y - yi; |
| 77 | |
| 78 | return delta_bilinear_c1u8(first_pixel_ptr + static_cast<int32_t>(xi) + static_cast<int32_t>(yi) * stride, stride, dx, dy); |
| 79 | } |
| 80 | |
| 81 | inline uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y) |
| 82 | { |
| 83 | ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr); |
| 84 | |
| 85 | // Calculate sampling position |
| 86 | float in_x = (x + 0.5f) * wr - 0.5f; |
| 87 | float in_y = (y + 0.5f) * hr - 0.5f; |
| 88 | |
| 89 | // Get bounding box offsets |
| 90 | int x_from = std::floor(x * wr - 0.5f - in_x); |
| 91 | int y_from = std::floor(y * hr - 0.5f - in_y); |
| 92 | int x_to = std::ceil((x + 1) * wr - 0.5f - in_x); |
| 93 | int y_to = std::ceil((y + 1) * hr - 0.5f - in_y); |
| 94 | |
| 95 | // Clamp position to borders |
| 96 | in_x = std::max(-1.f, std::min(in_x, static_cast<float>(width))); |
| 97 | in_y = std::max(-1.f, std::min(in_y, static_cast<float>(height))); |
| 98 | |
| 99 | // Clamp bounding box offsets to borders |
| 100 | x_from = ((in_x + x_from) < -1) ? -1 : x_from; |
| 101 | y_from = ((in_y + y_from) < -1) ? -1 : y_from; |
| 102 | x_to = ((in_x + x_to) > width) ? (width - in_x) : x_to; |
| 103 | y_to = ((in_y + y_to) > height) ? (height - in_y) : y_to; |
| 104 | |
| 105 | // Get pixel index |
| 106 | const int xi = std::floor(in_x); |
| 107 | const int yi = std::floor(in_y); |
| 108 | |
| 109 | // Bounding box elements in each dimension |
| 110 | const int x_elements = (x_to - x_from + 1); |
| 111 | const int y_elements = (y_to - y_from + 1); |
| 112 | ARM_COMPUTE_ERROR_ON(x_elements == 0 || y_elements == 0); |
| 113 | |
| 114 | // Sum pixels in area |
| 115 | int sum = 0; |
| 116 | for(int j = yi + y_from, je = yi + y_to; j <= je; ++j) |
| 117 | { |
| 118 | const uint8_t *ptr = first_pixel_ptr + j * stride + xi + x_from; |
| 119 | sum = std::accumulate(ptr, ptr + x_elements, sum); |
| 120 | } |
| 121 | |
| 122 | // Return average |
| 123 | return sum / (x_elements * y_elements); |
| 124 | } |
| 125 | |
| 126 | template <size_t dimension> |
| 127 | struct IncrementIterators |
| 128 | { |
| 129 | template <typename T, typename... Ts> |
| 130 | static void unroll(T &&it, Ts &&... iterators) |
| 131 | { |
| 132 | it.increment(dimension); |
| 133 | IncrementIterators<dimension>::unroll<Ts...>(std::forward<Ts>(iterators)...); |
| 134 | } |
| 135 | |
| 136 | template <typename T> |
| 137 | static void unroll(T &&it) |
| 138 | { |
| 139 | it.increment(dimension); |
| 140 | // End of recursion |
| 141 | } |
| 142 | |
| 143 | static void unroll() |
| 144 | { |
| 145 | // End of recursion |
| 146 | } |
| 147 | }; |
| 148 | |
| 149 | template <size_t dim> |
| 150 | struct ForEachDimension |
| 151 | { |
| 152 | template <typename L, typename... Ts> |
| 153 | static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&... iterators) |
| 154 | { |
| 155 | const auto &d = w[dim - 1]; |
| 156 | |
| 157 | for(auto v = d.start(); v < d.end(); v += d.step(), IncrementIterators < dim - 1 >::unroll(iterators...)) |
| 158 | { |
| 159 | id.set(dim - 1, v); |
| 160 | ForEachDimension < dim - 1 >::unroll(w, id, lambda_function, iterators...); |
| 161 | } |
| 162 | } |
| 163 | }; |
| 164 | |
| 165 | template <> |
| 166 | struct ForEachDimension<0> |
| 167 | { |
| 168 | template <typename L, typename... Ts> |
| 169 | static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&... iterators) |
| 170 | { |
| 171 | lambda_function(id); |
| 172 | } |
| 173 | }; |
| 174 | |
| 175 | template <typename L, typename... Ts> |
| 176 | inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators) |
| 177 | { |
| 178 | w.validate(); |
| 179 | |
| 180 | Coordinates id; |
| 181 | ForEachDimension<Coordinates::num_max_dimensions>::unroll(w, id, std::forward<L>(lambda_function), std::forward<Ts>(iterators)...); |
| 182 | } |
| 183 | |
| 184 | inline constexpr Iterator::Iterator() |
| 185 | : _ptr(nullptr), _dims() |
| 186 | { |
| 187 | } |
| 188 | |
| 189 | inline Iterator::Iterator(const ITensor *tensor, const Window &win) |
| 190 | : Iterator() |
| 191 | { |
| 192 | ARM_COMPUTE_ERROR_ON(tensor == nullptr); |
| 193 | const ITensorInfo *info = tensor->info(); |
| 194 | ARM_COMPUTE_ERROR_ON(info == nullptr); |
| 195 | const Strides &strides = info->strides_in_bytes(); |
| 196 | |
| 197 | _ptr = tensor->buffer() + info->offset_first_element_in_bytes(); |
| 198 | |
| 199 | //Initialize the stride for each dimension and calculate the position of the first element of the iteration: |
| 200 | for(unsigned int n = 0; n < info->num_dimensions(); ++n) |
| 201 | { |
| 202 | _dims[n]._stride = win[n].step() * strides[n]; |
| 203 | std::get<0>(_dims)._dim_start += strides[n] * win[n].start(); |
| 204 | } |
| 205 | |
| 206 | //Copy the starting point to all the dimensions: |
| 207 | for(unsigned int n = 1; n < Coordinates::num_max_dimensions; ++n) |
| 208 | { |
| 209 | _dims[n]._dim_start = std::get<0>(_dims)._dim_start; |
| 210 | } |
| 211 | |
| 212 | ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(win, info->num_dimensions()); |
| 213 | } |
| 214 | |
| 215 | inline void Iterator::increment(const size_t dimension) |
| 216 | { |
| 217 | ARM_COMPUTE_ERROR_ON(dimension >= Coordinates::num_max_dimensions); |
| 218 | |
| 219 | _dims[dimension]._dim_start += _dims[dimension]._stride; |
| 220 | |
| 221 | for(unsigned int n = 0; n < dimension; ++n) |
| 222 | { |
| 223 | _dims[n]._dim_start = _dims[dimension]._dim_start; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | inline constexpr int Iterator::offset() const |
| 228 | { |
| 229 | return _dims.at(0)._dim_start; |
| 230 | } |
| 231 | |
| 232 | inline constexpr uint8_t *Iterator::ptr() const |
| 233 | { |
| 234 | return _ptr + _dims.at(0)._dim_start; |
| 235 | } |
| 236 | |
| 237 | inline void Iterator::reset(const size_t dimension) |
| 238 | { |
| 239 | ARM_COMPUTE_ERROR_ON(dimension >= Coordinates::num_max_dimensions - 1); |
| 240 | |
| 241 | _dims[dimension]._dim_start = _dims[dimension + 1]._dim_start; |
| 242 | |
| 243 | for(unsigned int n = 0; n < dimension; ++n) |
| 244 | { |
| 245 | _dims[n]._dim_start = _dims[dimension]._dim_start; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | inline bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, int fixed_point_position) |
| 250 | { |
| 251 | if(info.tensor_shape().total_size() == 0) |
| 252 | { |
| 253 | info.set_data_type(data_type); |
| 254 | info.set_tensor_shape(shape); |
| 255 | info.set_num_channels(num_channels); |
| 256 | info.set_fixed_point_position(fixed_point_position); |
| 257 | return true; |
| 258 | } |
| 259 | |
| 260 | return false; |
| 261 | } |
| 262 | |
| 263 | inline bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape) |
| 264 | { |
| 265 | if(info.tensor_shape().total_size() == 0) |
| 266 | { |
| 267 | info.set_tensor_shape(shape); |
| 268 | return true; |
| 269 | } |
| 270 | |
| 271 | return false; |
| 272 | } |
| 273 | |
| 274 | inline bool set_format_if_unknown(ITensorInfo &info, Format format) |
| 275 | { |
| 276 | if(info.data_type() == DataType::UNKNOWN) |
| 277 | { |
| 278 | info.set_format(format); |
| 279 | return true; |
| 280 | } |
| 281 | |
| 282 | return false; |
| 283 | } |
| 284 | |
| 285 | inline bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type) |
| 286 | { |
| 287 | if(info.data_type() == DataType::UNKNOWN) |
| 288 | { |
| 289 | info.set_data_type(data_type); |
| 290 | return true; |
| 291 | } |
| 292 | |
| 293 | return false; |
| 294 | } |
| 295 | |
| 296 | inline bool set_fixed_point_position_if_zero(ITensorInfo &info, int fixed_point_position) |
| 297 | { |
| 298 | if(info.fixed_point_position() == 0 && (info.data_type() == DataType::QS8 || info.data_type() == DataType::QS16)) |
| 299 | { |
| 300 | info.set_fixed_point_position(fixed_point_position); |
| 301 | return true; |
| 302 | } |
| 303 | |
| 304 | return false; |
| 305 | } |
| 306 | } // namespace arm_compute |