blob: 07318eaf7a3ac6b2ff72ea369e0ebc34761d6729 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2016, 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_HELPERS_H__
25#define __ARM_COMPUTE_HELPERS_H__
26
27#include "arm_compute/core/CL/CLTypes.h"
28#include "arm_compute/core/Coordinates.h"
29#include "arm_compute/core/IAccessWindow.h"
30#include "arm_compute/core/Steps.h"
31#include "arm_compute/core/Strides.h"
32#include "arm_compute/core/TensorShape.h"
33#include "arm_compute/core/Types.h"
34#include "arm_compute/core/Window.h"
35#include <array>
36#include <cstddef>
37#include <cstdint>
38#include <memory>
39#include <tuple>
40#include <type_traits>
41#include <utility>
42
43namespace arm_compute
44{
45class IKernel;
46class ITensor;
47class ITensorInfo;
48
49namespace cpp14
50{
51template <class T>
52struct _Unique_if
53{
54 typedef std::unique_ptr<T> _Single_object;
55};
56
57template <class T>
58struct _Unique_if<T[]>
59{
60 typedef std::unique_ptr<T[]> _Unknown_bound;
61};
62
63template <class T, size_t N>
64struct _Unique_if<T[N]>
65{
66 typedef void _Known_bound;
67};
68
69template <class T, class... Args>
70typename _Unique_if<T>::_Single_object
71make_unique(Args &&... args)
72{
73 return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
74}
75
76template <class T>
77typename _Unique_if<T>::_Unknown_bound
78make_unique(size_t n)
79{
80 typedef typename std::remove_extent<T>::type U;
81 return std::unique_ptr<T>(new U[n]());
82}
83
84template <class T, class... Args>
85typename _Unique_if<T>::_Known_bound
86make_unique(Args &&...) = delete;
87}
88
89template <typename T>
90struct enable_bitwise_ops
91{
92 static constexpr bool value = false;
93};
94
95template <typename T>
96typename std::enable_if<enable_bitwise_ops<T>::value, T>::type operator&(T lhs, T rhs)
97{
98 using underlying_type = typename std::underlying_type<T>::type;
99 return static_cast<T>(static_cast<underlying_type>(lhs) & static_cast<underlying_type>(rhs));
100}
101
102namespace traits
103{
104/** Check if a type T is contained in a tuple Tuple of types */
105template <typename T, typename Tuple>
106struct is_contained;
107
108template <typename T>
109struct is_contained<T, std::tuple<>> : std::false_type
110{
111};
112
113template <typename T, typename... Ts>
114struct is_contained<T, std::tuple<T, Ts...>> : std::true_type
115{
116};
117
118template <typename T, typename U, typename... Ts>
119struct is_contained<T, std::tuple<U, Ts...>> : is_contained<T, std::tuple<Ts...>>
120{
121};
122}
123
124/** Computes bilinear interpolation using the pointer to the top-left pixel and the pixel's distance between
125 * the real coordinates and the smallest following integer coordinates.
126 *
127 * @param[in] pixel_ptr Pointer to the top-left pixel value. Format: Single channel U8
128 * @param[in] stride Stride to access the bottom-left and bottom-right pixel values
129 * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer
130 * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer
131 *
132 * @note dx and dy must be in the range [0, 1.0]
133 *
134 * @return The bilinear interpolated pixel value
135 */
136inline uint8_t delta_bilinear_c1u8(const uint8_t *pixel_ptr, size_t stride, float dx, float dy);
137
138/** Return the pixel at (x,y) using bilinear interpolation. The image must be single channel U8
139 *
140 * @warning Only works if the iterator was created with an IImage
141 *
142 * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
143 * @param[in] stride Stride in bytes of the image;
144 * @param[in] x X position of the wanted pixel
145 * @param[in] y Y position of the wanted pixel
146 *
147 * @return The pixel at (x, y) using bilinear interpolation.
148 */
149inline uint8_t pixel_bilinear_c1u8(const uint8_t *first_pixel_ptr, size_t stride, float x, float y);
150
151/** Return the pixel at (x,y) using bilinear interpolation by clamping when out of borders. The image must be single channel U8
152 *
153 * @warning Only works if the iterator was created with an IImage
154 *
155 * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
156 * @param[in] stride Stride in bytes of the image
157 * @param[in] width Width of the image
158 * @param[in] height Height of the image
159 * @param[in] x X position of the wanted pixel
160 * @param[in] y Y position of the wanted pixel
161 *
162 * @return The pixel at (x, y) using bilinear interpolation.
163 */
164inline uint8_t pixel_bilinear_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float x, float y);
165
166/** Return the pixel at (x,y) using area interpolation by clamping when out of borders. The image must be single channel U8
167 *
168 * @note The interpolation area depends on the width and height ration of the input and output images
169 * @note Currently average of the contributing pixels is calculated
170 *
171 * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
172 * @param[in] stride Stride in bytes of the image
173 * @param[in] width Width of the image
174 * @param[in] height Height of the image
175 * @param[in] wr Width ratio among the input image width and output image width.
176 * @param[in] hr Height ratio among the input image height and output image height.
177 * @param[in] x X position of the wanted pixel
178 * @param[in] y Y position of the wanted pixel
179 *
180 * @return The pixel at (x, y) using area interpolation.
181 */
182inline uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y);
183
184/** Performs clamping among a lower and upper value.
185 *
186 * @param[in] n Value to clamp.
187 * @param[in] lower Lower threshold.
188 * @param[in] upper Upper threshold.
189 *
190 * @return Clamped value.
191 */
192template <typename T>
193inline T clamp(const T &n, const T &lower, const T &upper)
194{
195 return std::max(lower, std::min(n, upper));
196}
197
198/** Base case of for_each. Does nothing. */
199template <typename F>
200inline void for_each(F &&)
201{
202}
203
204/** Call the function for each of the arguments
205 *
206 * @param[in] func Function to be called
207 * @param[in] arg Argument passed to the function
208 * @param[in] args Remaining arguments
209 */
210template <typename F, typename T, typename... Ts>
211inline void for_each(F &&func, T &&arg, Ts &&... args)
212{
213 func(arg);
214 for_each(func, args...);
215}
216
217/** Base case of foldl.
218 *
219 * @return value.
220 */
221template <typename F, typename T>
222inline T foldl(F &&, const T &value)
223{
224 return value;
225}
226
227/** Base case of foldl.
228 *
229 * @return Function evaluation for value1 and value2
230 */
231template <typename F, typename T, typename U>
232inline auto foldl(F &&func, T &&value1, U &&value2) -> decltype(func(value1, value2))
233{
234 return func(value1, value2);
235}
236
237/** Fold left.
238 *
239 * @param[in] func Function to be called
240 * @param[in] initial Initial value
241 * @param[in] value Argument passed to the function
242 * @param[in] values Remaining arguments
243 */
244template <typename F, typename I, typename T, typename... Vs>
245inline I foldl(F &&func, I &&initial, T &&value, Vs &&... values)
246{
247 return foldl(std::forward<F>(func), func(std::forward<I>(initial), std::forward<T>(value)), std::forward<Vs>(values)...);
248}
249
250/** Iterator updated by @ref execute_window_loop for each window element */
251class Iterator
252{
253public:
254 /** Default constructor to create an empty iterator */
255 constexpr Iterator();
256 /** Create a container iterator for the metadata and allocation contained in the ITensor
257 *
258 * @param[in] tensor The tensor to associate to the iterator.
259 * @param[in] window The window which will be used to iterate over the tensor.
260 */
261 Iterator(const ITensor *tensor, const Window &window);
262
263 /** Increment the iterator along the specified dimension of the step value associated to the dimension.
264 *
265 * @warning It is the caller's responsibility to call increment(dimension+1) when reaching the end of a dimension, the iterator will not check for overflow.
266 *
267 * @note When incrementing a dimension 'n' the coordinates of all the dimensions in the range (0,n-1) are reset. For example if you iterate over a 2D image, everytime you change row (dimension 1), the iterator for the width (dimension 0) is reset to its start.
268 *
269 * @param[in] dimension Dimension to increment
270 */
271 void increment(size_t dimension);
272
273 /** Return the offset in bytes from the first element to the current position of the iterator
274 *
275 * @return The current position of the iterator in bytes relative to the first element.
276 */
277 constexpr int offset() const;
278
279 /** Return a pointer to the current pixel.
280 *
281 * @warning Only works if the iterator was created with an ITensor.
282 *
283 * @return equivalent to buffer() + offset()
284 */
285 constexpr uint8_t *ptr() const;
286
287 /** Move the iterator back to the beginning of the specified dimension.
288 *
289 * @param[in] dimension Dimension to reset
290 */
291 void reset(size_t dimension);
292
293private:
294 uint8_t *_ptr;
295
296 class Dimension
297 {
298 public:
299 constexpr Dimension()
300 : _dim_start(0), _stride(0)
301 {
302 }
303
304 int _dim_start;
305 int _stride;
306 };
307
308 std::array<Dimension, Coordinates::num_max_dimensions> _dims;
309};
310
311/** Iterate through the passed window, automatically adjusting the iterators and calling the lambda_functino for each element.
312 * It passes the x and y positions to the lambda_function for each iteration
313 *
314 * @param[in] w Window to iterate through.
315 * @param[in] lambda_function The function of type void(function)( const Coordinates & id ) to call at each iteration.
316 * Where id represents the absolute coordinates of the item to process.
317 * @param[in,out] iterators Tensor iterators which will be updated by this function before calling lambda_function.
318 */
319template <typename L, typename... Ts>
320inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators);
321
322/** Update window and padding size for each of the access patterns.
323 *
324 * First the window size is reduced based on all access patterns that are not
325 * allowed to modify the padding of the underlying tensor. Then the padding of
326 * the remaining tensors is increased to match the window.
327 *
328 * @param[in] win Window that is used by the kernel.
329 * @param[in] patterns Access patterns used to calculate the final window and padding.
330 *
331 * @return True if the window has been changed. Changes to the padding do not
332 * influence the returned value.
333 */
334template <typename... Ts>
335bool update_window_and_padding(Window &win, Ts &&... patterns)
336{
337 bool window_changed = false;
338
339 for_each([&](const IAccessWindow & w)
340 {
341 window_changed |= w.update_window_if_needed(win);
342 },
343 patterns...);
344
345 bool padding_changed = false;
346
347 for_each([&](const IAccessWindow & w)
348 {
349 padding_changed |= w.update_padding_if_needed(win);
350 },
351 patterns...);
352
353 return window_changed;
354}
355
356/** Calculate the maximum window for a given tensor shape and border setting
357 *
358 * @param[in] info Tensor info object defining the shape of the object for which the window is created.
359 * @param[in] steps (Optional) Number of elements processed for each step.
360 * @param[in] skip_border (Optional) If true exclude the border region from the window.
361 * @param[in] border_size (Optional) Border size.
362 *
363 * @return The maximum window the kernel can be executed on.
364 */
365Window calculate_max_window(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
366
367/** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting
368 *
369 * @param[in] info Tensor info object defining the shape of the object for which the window is created.
370 * @param[in] steps (Optional) Number of elements processed for each step.
371 * @param[in] skip_border (Optional) If true exclude the border region from the window.
372 * @param[in] border_size (Optional) Border size. The border region will be excluded from the window.
373 *
374 * @return The maximum window the kernel can be executed on.
375 */
376Window calculate_max_window_horizontal(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
377
378/** Calculate the maximum window for a given tensor shape and border setting. The window will also includes the border.
379 *
380 * @param[in] info Tensor info object defining the shape of the object for which the window is created.
381 * @param[in] steps (Optional) Number of elements processed for each step.
382 * @param[in] border_size (Optional) Border size. The border region will be included in the window.
383 *
384 * @return The maximum window the kernel can be executed on.
385 */
386Window calculate_max_enlarged_window(const ITensorInfo &info, const Steps &steps = Steps(), BorderSize border_size = BorderSize());
387
388/** Intersect multiple valid regions.
389 *
390 * @param[in] regions Valid regions.
391 *
392 * @return Intersection of all regions.
393 */
394template <typename... Ts>
395ValidRegion intersect_valid_regions(Ts &&... regions)
396{
397 auto intersect = [](const ValidRegion & r1, const ValidRegion & r2) -> ValidRegion
398 {
399 ValidRegion region;
400
401 for(size_t d = 0; d < std::min(r1.anchor.num_dimensions(), r2.anchor.num_dimensions()); ++d)
402 {
403 region.anchor.set(d, std::max(r1.anchor[d], r2.anchor[d]));
404 }
405
406 for(size_t d = 0; d < std::min(r1.shape.num_dimensions(), r2.shape.num_dimensions()); ++d)
407 {
408 region.shape.set(d, std::min(r1.shape[d], r2.shape[d]));
409 }
410
411 return region;
412 };
413
414 return foldl(intersect, std::forward<Ts>(regions)...);
415}
416
417/** Create a strides object based on the provided strides and the tensor dimensions.
418 *
419 * @param[in] info Tensor info object providing the shape of the tensor for unspecified strides.
420 * @param[in] stride_x Stride to be used in X dimension (in bytes).
421 * @param[in] fixed_strides Strides to be used in higher dimensions starting at Y (in bytes).
422 *
423 * @return Strides object based on the specified strides. Missing strides are
424 * calculated based on the tensor shape and the strides of lower dimensions.
425 */
426template <typename T, typename... Ts>
427inline Strides compute_strides(const ITensorInfo &info, T stride_x, Ts &&... fixed_strides)
428{
429 const TensorShape &shape = info.tensor_shape();
430
431 // Create strides object
432 Strides strides(stride_x, fixed_strides...);
433
434 for(size_t i = 1 + sizeof...(Ts); i < info.num_dimensions(); ++i)
435 {
436 strides.set(i, shape[i - 1] * strides[i - 1]);
437 }
438
439 return strides;
440}
441
442/** Create a strides object based on the tensor dimensions.
443 *
444 * @param[in] info Tensor info object used to compute the strides.
445 *
446 * @return Strides object based on element size and tensor shape.
447 */
448template <typename... Ts>
449inline Strides compute_strides(const ITensorInfo &info)
450{
451 return compute_strides(info, info.element_size());
452}
453
454/* Auto initialize the tensor info (shape, number of channels, data type and fixed point position) if the current assignment is empty.
455 *
456 * @param[in,out] info Tensor info used to check and assign.
457 * @param[in] shape New shape.
458 * @param[in] num_channels New number of channels.
459 * @param[in] data_type New data type
460 * @param[in] fixed_point_position New fixed point position
461 *
462 * @return True if the tensor info has been initialized
463 */
464bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, int fixed_point_position);
465
466/* Set the shape to the specified value if the current assignment is empty.
467 *
468 * @param[in,out] info Tensor info used to check and assign.
469 * @param[in] shape New shape.
470 *
471 * @return True if the shape has been changed.
472 */
473bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape);
474
475/* Set the format, data type and number of channels to the specified value if
476 * the current data type is unknown.
477 *
478 * @param[in,out] info Tensor info used to check and assign.
479 * @param[in] format New format.
480 *
481 * @return True if the format has been changed.
482 */
483bool set_format_if_unknown(ITensorInfo &info, Format format);
484
485/* Set the data type and number of channels to the specified value if
486 * the current data type is unknown.
487 *
488 * @param[in,out] info Tensor info used to check and assign.
489 * @param[in] data_type New data type.
490 *
491 * @return True if the data type has been changed.
492 */
493bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type);
494
495/* Set the fixed point position to the specified value if
496 * the current fixed point position is 0 and the data type is QS8 or QS16
497 *
498 * @param[in,out] info Tensor info used to check and assign.
499 * @param[in] fixed_point_position New fixed point position
500 *
501 * @return True if the fixed point position has been changed.
502 */
503bool set_fixed_point_position_if_zero(ITensorInfo &info, int fixed_point_position);
504} // namespace arm_compute
505
506#include "arm_compute/core/Helpers.inl"
507#endif /*__ARM_COMPUTE_HELPERS_H__ */