blob: 8df5c65e1e65f76b60df81e167c64b7c92c56896 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2016-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000028#include "arm_compute/core/QAsymm8.h"
29#include "arm_compute/core/Rounding.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000030#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000031#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010033#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000035#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include <cstddef>
37#include <cstdint>
38#include <string>
39#include <utility>
40
41namespace arm_compute
42{
Georgios Pinitas583137c2017-08-31 18:12:42 +010043/** 16-bit floating point type */
44using half = half_float::half;
45
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000046/** Permutation vector */
47using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010048/** Bidirectional strides */
49using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000050
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051/** Image colour formats */
52enum class Format
53{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070054 UNKNOWN, /**< Unknown image format */
55 U8, /**< 1 channel, 1 U8 per channel */
56 S16, /**< 1 channel, 1 S16 per channel */
57 U16, /**< 1 channel, 1 U16 per channel */
58 S32, /**< 1 channel, 1 S32 per channel */
59 U32, /**< 1 channel, 1 U32 per channel */
60 F16, /**< 1 channel, 1 F16 per channel */
61 F32, /**< 1 channel, 1 F32 per channel */
62 UV88, /**< 2 channel, 1 U8 per channel */
63 RGB888, /**< 3 channels, 1 U8 per channel */
64 RGBA8888, /**< 4 channels, 1 U8 per channel */
65 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
66 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
67 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
68 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
69 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
70 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071};
72
73/** Available data types */
74enum class DataType
75{
Alex Gildayc357c472018-03-21 13:54:09 +000076 UNKNOWN, /**< Unknown data type */
77 U8, /**< unsigned 8-bit number */
78 S8, /**< signed 8-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000079 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 U16, /**< unsigned 16-bit number */
81 S16, /**< signed 16-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000082 U32, /**< unsigned 32-bit number */
83 S32, /**< signed 32-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000084 U64, /**< unsigned 64-bit number */
85 S64, /**< signed 64-bit number */
86 F16, /**< 16-bit floating-point number */
87 F32, /**< 32-bit floating-point number */
88 F64, /**< 64-bit floating-point number */
89 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090};
91
Daniil Efremov02bf80d2017-11-22 00:26:51 +070092/** Available Sampling Policies */
93enum class SamplingPolicy
94{
95 CENTER, /**< Samples are taken at pixel center */
96 TOP_LEFT /**< Samples are taken at pixel top left corner */
97};
98
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099/** Constant value of the border pixels when using BorderMode::CONSTANT */
100constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
101
Alex Gildayc357c472018-03-21 13:54:09 +0000102/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103constexpr float SCALE_PYRAMID_HALF = 0.5f;
104
Alex Gildayc357c472018-03-21 13:54:09 +0000105/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
107
Georgios Pinitas4074c992018-01-30 18:13:46 +0000108/** Supported tensor data layouts */
109enum class DataLayout
110{
Alex Gildayc357c472018-03-21 13:54:09 +0000111 UNKNOWN, /**< Unknown data layout */
112 NCHW, /**< Num samples, channels, height, width */
113 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000114};
115
Isabella Gottardid17a6772018-02-27 17:41:55 +0000116/** Supported tensor data layout dimensions */
117enum class DataLayoutDimension
118{
Alex Gildayc357c472018-03-21 13:54:09 +0000119 CHANNEL, /**< channel */
120 HEIGHT, /**< height */
121 WIDTH, /**< width */
122 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000123};
124
Michel Iwaniec00633802017-10-12 14:14:15 +0100125/** Quantization settings (used for QASYMM8 data type) */
126struct QuantizationInfo
127{
Alex Gildayc357c472018-03-21 13:54:09 +0000128 /** Default constructor */
Georgios Pinitasf8d8f3a2018-06-06 17:57:04 +0100129 QuantizationInfo() noexcept
130 : scale(0.0f),
131 offset(0)
Michel Iwaniec00633802017-10-12 14:14:15 +0100132 {
133 }
134
Alex Gildayc357c472018-03-21 13:54:09 +0000135 /** Construct quantization info.
136 *
137 * @param[in] scale Scale.
138 * @param[in] offset Offset.
139 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100140 QuantizationInfo(float scale, int offset)
141 : scale(scale), offset(offset)
142 {
143 }
144
Alex Gildayc357c472018-03-21 13:54:09 +0000145 /** Check whether equal to a given quantization info.
146 *
147 * @param[in] other Other quantization info.
148 *
149 * @return True if the given quantization info is the same.
150 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100151 bool operator==(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700152 {
153 return scale == other.scale && offset == other.offset;
154 }
155
Alex Gildayc357c472018-03-21 13:54:09 +0000156 /** Check whether not equal to a given quantization info.
157 *
158 * @param[in] other Other quantization info.
159 *
160 * @return True if the given quantization info is not the same.
161 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100162 bool operator!=(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700163 {
164 return !(*this == other);
165 }
166
Michel Iwaniec00633802017-10-12 14:14:15 +0100167 float scale; /**< scale */
168 int offset; /**< offset */
169
Alex Gildayc357c472018-03-21 13:54:09 +0000170 /** Quantizes a value using the scale/offset in this QuantizationInfo
171 *
172 * @param[in] value Value to quantize.
173 * @param[in] rounding_policy Policy to use when rounding.
174 *
175 * @return the quantized value.
176 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000177 qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100178 {
179 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000180 return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
Michel Iwaniec00633802017-10-12 14:14:15 +0100181 }
182
Alex Gildayc357c472018-03-21 13:54:09 +0000183 /** Dequantizes a value using the scale/offset in this QuantizationInfo
184 *
185 * @param[in] value Value to dequantize.
186 *
187 * @return the original value before quantization.
188 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000189 float dequantize(qasymm8_t value) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100190 {
191 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000192 return scvt_f32_qasymm8(value, scale, offset);
Michel Iwaniec00633802017-10-12 14:14:15 +0100193 }
194
Alex Gildayc357c472018-03-21 13:54:09 +0000195 /** Indicates whether this QuantizationInfo has valid settings or not
196 *
197 * @return True if the this has invalid settings.
198 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100199 bool empty() const
200 {
201 return scale == 0;
202 }
203};
204
Alex Gildayc357c472018-03-21 13:54:09 +0000205/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100206struct ValidRegion
207{
Alex Gildayc357c472018-03-21 13:54:09 +0000208 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100209 ValidRegion()
210 : anchor{}, shape{}
211 {
212 }
213
Alex Gildayc357c472018-03-21 13:54:09 +0000214 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100215 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000216 /** Allow instances of this class to be move constructed */
217 ValidRegion(ValidRegion &&) = default;
218 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100219 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000220 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100221 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000222 /** Default destructor */
223 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100224
Alex Gildayc357c472018-03-21 13:54:09 +0000225 /** Constructor for a valid region with default number of dimensions
226 *
227 * @param[in] an_anchor Anchor for the start of the valid region.
228 * @param[in] a_shape Shape of the valid region.
229 *
230 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000231 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
232 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100233 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000234 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
235 }
236
Alex Gildayc357c472018-03-21 13:54:09 +0000237 /** Constructor for a valid region with specified number of dimensions
238 *
239 * @param[in] an_anchor Anchor for the start of the valid region.
240 * @param[in] a_shape Shape of the valid region.
241 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
242 *
243 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000244 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
245 : anchor{ an_anchor }, shape{ a_shape }
246 {
247 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
248 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100249 }
250
251 /** Return the start of the valid region for the given dimension @p d */
252 int start(unsigned int d) const
253 {
254 return anchor[d];
255 }
256
257 /** Return the end of the valid region for the given dimension @p d */
258 int end(unsigned int d) const
259 {
260 return anchor[d] + shape[d];
261 }
262
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000263 /** Accessor to set the value of anchor and shape for one of the dimensions.
264 *
265 * @param[in] dimension Dimension for which the value is set.
266 * @param[in] start Value to be set in anchor for the dimension.
267 * @param[in] size Value to be set in shape for the dimension.
268 *
269 * @return *this.
270 */
271 ValidRegion &set(size_t dimension, int start, size_t size)
272 {
273 anchor.set(dimension, start);
274 shape.set(dimension, size);
275 return *this;
276 }
277
Alex Gildayc357c472018-03-21 13:54:09 +0000278 Coordinates anchor; /**< Anchor for the start of the valid region. */
279 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100280};
281
282/** Methods available to handle borders */
283enum class BorderMode
284{
285 UNDEFINED, /**< Borders are left undefined */
286 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
287 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
288};
289
290/** Container for 2D border size */
291struct BorderSize
292{
293 /** Empty border, i.e. no border */
294 constexpr BorderSize()
295 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
296 {
297 }
298
299 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100300 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100301 : top{ size }, right{ size }, bottom{ size }, left{ size }
302 {
303 }
304
305 /** Border with same size for top/bottom and left/right */
306 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
307 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
308 {
309 }
310
311 /** Border with different sizes */
312 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
313 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
314 {
315 }
316
317 /** Check if the entire border is zero */
318 constexpr bool empty() const
319 {
320 return top == 0 && right == 0 && bottom == 0 && left == 0;
321 }
322
323 /** Check if the border is the same size on all sides */
324 constexpr bool uniform() const
325 {
326 return top == right && top == bottom && top == left;
327 }
328
Alex Gildayc357c472018-03-21 13:54:09 +0000329 /** Scale this border size.
330 *
331 * @param[in] scale Scale to multiply border size by.
332 *
333 * @return *this.
334 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100335 BorderSize &operator*=(float scale)
336 {
337 top *= scale;
338 right *= scale;
339 bottom *= scale;
340 left *= scale;
341
342 return *this;
343 }
344
Alex Gildayc357c472018-03-21 13:54:09 +0000345 /** Scale a copy of this border size.
346 *
347 * @param[in] scale Scale to multiply border size by.
348 *
349 * @return a scaled copy of this.
350 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100351 BorderSize operator*(float scale)
352 {
353 BorderSize size = *this;
354 size *= scale;
355
356 return size;
357 }
358
Alex Gildayc357c472018-03-21 13:54:09 +0000359 /** Limit this border size.
360 *
361 * @param[in] limit Border size to limit this border size to.
362 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100363 void limit(const BorderSize &limit)
364 {
365 top = std::min(top, limit.top);
366 right = std::min(right, limit.right);
367 bottom = std::min(bottom, limit.bottom);
368 left = std::min(left, limit.left);
369 }
370
Alex Gildayc357c472018-03-21 13:54:09 +0000371 unsigned int top; /**< top of the border */
372 unsigned int right; /**< right of the border */
373 unsigned int bottom; /**< bottom of the border */
374 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100375};
376
Alex Gildayc357c472018-03-21 13:54:09 +0000377/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100378using PaddingSize = BorderSize;
379
380/** Policy to handle overflow */
381enum class ConvertPolicy
382{
383 WRAP, /**< Wrap around */
384 SATURATE /**< Saturate */
385};
386
387/** Interpolation method */
388enum class InterpolationPolicy
389{
390 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
391 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
392 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
393};
394
395/** Bilinear Interpolation method used by LKTracker */
396enum class BilinearInterpolation
397{
Alex Gildayc357c472018-03-21 13:54:09 +0000398 BILINEAR_OLD_NEW, /**< Old-new method */
399 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100400};
401
402/** Threshold mode */
403enum class ThresholdType
404{
405 BINARY, /**< Threshold with one value */
406 RANGE /**< Threshold with two values*/
407};
408
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100409/** Termination criteria */
410enum class Termination
411{
Alex Gildayc357c472018-03-21 13:54:09 +0000412 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
413 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
414 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100415};
416
417/** Magnitude calculation type. */
418enum class MagnitudeType
419{
420 L1NORM, /**< L1 normalization type */
421 L2NORM /**< L2 normalization type */
422};
423
424/** Phase calculation type.
425 *
426 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
427 */
428enum class PhaseType
429{
430 SIGNED, /**< Angle range: [0, 360] */
431 UNSIGNED /**< Angle range: [0, 180] */
432};
433
434/** Keypoint type */
435struct KeyPoint
436{
437 int32_t x{ 0 }; /**< X coordinates */
438 int32_t y{ 0 }; /**< Y coordinates */
439 float strength{ 0.f }; /**< Strength of the point */
440 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
441 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
442 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
443 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
444};
445
Alex Gildayc357c472018-03-21 13:54:09 +0000446/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100447using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
448
449/** Rectangle type */
450struct Rectangle
451{
452 uint16_t x; /**< Top-left x coordinate */
453 uint16_t y; /**< Top-left y coordinate */
454 uint16_t width; /**< Width of the rectangle */
455 uint16_t height; /**< Height of the rectangle */
456};
457
458/** Coordinate type */
459struct Coordinates2D
460{
461 int32_t x; /**< X coordinates */
462 int32_t y; /**< Y coordinates */
463};
464
465/** Coordinate type */
466struct Coordinates3D
467{
468 uint32_t x; /**< X coordinates */
469 uint32_t y; /**< Y coordinates */
470 uint32_t z; /**< Z coordinates */
471};
472
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100473/** Padding information as a pair of unsigned int start/end */
474using PaddingInfo = std::pair<uint32_t, uint32_t>;
475
476/** List of padding information */
477using PaddingList = std::vector<PaddingInfo>;
478
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100479/** Region of interest */
480struct ROI
481{
482 Rectangle rect; /**< Rectangle specifying the region of interest */
483 uint16_t batch_idx; /**< The batch index of the region of interest */
484};
485
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100486/** Available channels */
487enum class Channel
488{
489 UNKNOWN, /** Unknown channel format */
490 C0, /**< First channel (used by formats with unknown channel types). */
491 C1, /**< Second channel (used by formats with unknown channel types). */
492 C2, /**< Third channel (used by formats with unknown channel types). */
493 C3, /**< Fourth channel (used by formats with unknown channel types). */
494 R, /**< Red channel. */
495 G, /**< Green channel. */
496 B, /**< Blue channel. */
497 A, /**< Alpha channel. */
498 Y, /**< Luma channel. */
499 U, /**< Cb/U channel. */
500 V /**< Cr/V/Value channel. */
501};
502
503/** Available matrix patterns */
504enum class MatrixPattern
505{
506 BOX, /**< Box pattern matrix. */
507 CROSS, /**< Cross pattern matrix. */
508 DISK, /**< Disk pattern matrix. */
509 OTHER /**< Any other matrix pattern. */
510};
511
512/** Available non linear functions. */
513enum class NonLinearFilterFunction : unsigned
514{
515 MEDIAN = 0, /**< Non linear median filter. */
516 MIN = 1, /**< Non linear erode. */
517 MAX = 2, /**< Non linear dilate. */
518};
519
Georgios Pinitasd9769582017-08-03 10:19:40 +0100520/** Available reduction operations */
521enum class ReductionOperation
522{
523 SUM_SQUARE, /**< Sum of squares */
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100524 SUM, /**< Sum */
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100525 MEAN_SUM, /**< Mean of sum */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100526};
527
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100528/** The normalization type used for the normalization layer */
529enum class NormType
530{
531 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
532 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
533 CROSS_MAP /**< Normalization applied cross maps */
534};
535
536/** Normalization type for Histogram of Oriented Gradients (HOG) */
537enum class HOGNormType
538{
539 L2_NORM = 1, /**< L2-norm */
540 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
541 L1_NORM = 3 /**< L1 norm */
542};
543
544/** Detection window used for the object detection. The detection window keeps the following information:
545 *
546 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
547 * -# Index of the class used for evaluating which class the detection window belongs to
548 * -# Confidence value (score) obtained with the classifier
549 */
550struct DetectionWindow
551{
552 uint16_t x{ 0 }; /**< Top-left x coordinate */
553 uint16_t y{ 0 }; /**< Top-left y coordinate */
554 uint16_t width{ 0 }; /**< Width of the detection window */
555 uint16_t height{ 0 }; /**< Height of the detection window */
556 uint16_t idx_class{ 0 }; /**< Index of the class */
557 float score{ 0.f }; /**< Confidence value for the detection window */
558};
559
560/** Dimension rounding type when down-scaling on CNNs
561 * @note Used in pooling and convolution layer
562 */
563enum class DimensionRoundingType
564{
565 FLOOR, /**< Floor rounding */
566 CEIL /**< Ceil rounding */
567};
568
569/** Available pooling types */
570enum class PoolingType
571{
572 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100573 AVG, /**< Average Pooling */
574 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100575};
576
Michalis Spyrou2709d612018-09-19 09:46:47 +0100577/** Available non maxima suppression types */
578enum class NMSType
579{
580 LINEAR, /**< Linear NMS */
581 GAUSSIAN, /**< Gaussian NMS */
582 ORIGINAL /**< Original NMS */
583};
584
585/** BoxWithNonMaximaSuppressionLimit Information class */
586class BoxNMSLimitInfo final
587{
588public:
589 /** Constructor
590 *
591 * @param[in] score_thresh (Optional) Score threshold.
592 * @param[in] nms (Optional) NMS value
593 * @param[in] detections (Optional) Number of detections
594 * @param[in] soft_nms_enabled (Optional) Enable SoftNMS
595 * @param[in] soft_nms_method (Optional) Soft NMS method
596 * @param[in] soft_nms_sigma (Optional) Soft NMS sigma value
597 * @param[in] soft_nms_min_score_thres (Optional) Soft NMS minimum score threshold
598 */
599 BoxNMSLimitInfo(float score_thresh = 0.05f, float nms = 0.3f,
600 int detections = 100, bool soft_nms_enabled = false,
601 NMSType soft_nms_method = NMSType::LINEAR,
602 float soft_nms_sigma = 0.5f, float soft_nms_min_score_thres = 0.001f)
603 : _score_thresh(score_thresh), _nms(nms), _detections_per_im(detections), _soft_nms_enabled(soft_nms_enabled), _soft_nms_method(soft_nms_method), _soft_nms_sigma(soft_nms_sigma),
604 _soft_nms_min_score_thres(soft_nms_min_score_thres)
605 {
606 }
607 /** Get the score threshold */
608 float score_thresh() const
609 {
610 return _score_thresh;
611 }
612 /** Get the NMS */
613 float nms() const
614 {
615 return _nms;
616 }
617 /** Get the number of detections */
618 int detections_per_im() const
619 {
620 return _detections_per_im;
621 }
622 /** Check if soft NMS is enabled */
623 bool soft_nms_enabled() const
624 {
625 return _soft_nms_enabled;
626 }
627 /** Get soft NMS method */
628 NMSType soft_nms_method() const
629 {
630 return _soft_nms_method;
631 }
632 /** Get soft NMS sigma */
633 float soft_nms_sigma() const
634 {
635 return _soft_nms_sigma;
636 }
637 /** Get soft nms min score threshold */
638 float soft_nms_min_score_thres() const
639 {
640 return _soft_nms_min_score_thres;
641 }
642
643private:
644 float _score_thresh;
645 float _nms;
646 int _detections_per_im;
647 bool _soft_nms_enabled;
648 NMSType _soft_nms_method;
649 float _soft_nms_sigma;
650 float _soft_nms_min_score_thres;
651};
652
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100653/** Padding and stride information class */
654class PadStrideInfo
655{
656public:
657 /** Constructor
658 *
659 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
660 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
661 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
662 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
663 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
664 */
665 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
666 unsigned int pad_x = 0, unsigned int pad_y = 0,
667 DimensionRoundingType round = DimensionRoundingType::FLOOR)
668 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100669 _pad_left(pad_x),
670 _pad_top(pad_y),
671 _pad_right(pad_x),
672 _pad_bottom(pad_y),
673 _round_type(round)
674 {
675 }
676 /** Constructor
677 *
678 * @param[in] stride_x Stride, in elements, across x.
679 * @param[in] stride_y Stride, in elements, across y.
680 * @param[in] pad_left Padding across x on the left, in elements.
681 * @param[in] pad_top Padding across y on the top, in elements.
682 * @param[in] pad_right Padding across x on the right, in elements.
683 * @param[in] pad_bottom Padding across y on the bottom, in elements.
684 * @param[in] round Dimensions rounding.
685 */
686 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
687 unsigned int pad_left, unsigned int pad_right,
688 unsigned int pad_top, unsigned int pad_bottom,
689 DimensionRoundingType round)
690 : _stride(std::make_pair(stride_x, stride_y)),
691 _pad_left(pad_left),
692 _pad_top(pad_top),
693 _pad_right(pad_right),
694 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100695 _round_type(round)
696 {
697 }
Alex Gildayc357c472018-03-21 13:54:09 +0000698 /** Get the stride.
699 *
700 * @return a pair: stride x, stride y.
701 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100702 std::pair<unsigned int, unsigned int> stride() const
703 {
704 return _stride;
705 }
Alex Gildayc357c472018-03-21 13:54:09 +0000706 /** Check whether the padding is symmetric.
707 *
708 * @return True if the padding is symmetric.
709 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000710 bool padding_is_symmetric() const
711 {
712 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
713 }
Alex Gildayc357c472018-03-21 13:54:09 +0000714 /** Get the padding.
715 *
716 * @note This should only be used when the padding is symmetric.
717 *
718 * @return a pair: padding left/right, padding top/bottom
719 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100720 std::pair<unsigned int, unsigned int> pad() const
721 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100722 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000723 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100724 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100725 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100726
Alex Gildayc357c472018-03-21 13:54:09 +0000727 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100728 unsigned int pad_left() const
729 {
730 return _pad_left;
731 }
Alex Gildayc357c472018-03-21 13:54:09 +0000732 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100733 unsigned int pad_right() const
734 {
735 return _pad_right;
736 }
Alex Gildayc357c472018-03-21 13:54:09 +0000737 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100738 unsigned int pad_top() const
739 {
740 return _pad_top;
741 }
Alex Gildayc357c472018-03-21 13:54:09 +0000742 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100743 unsigned int pad_bottom() const
744 {
745 return _pad_bottom;
746 }
747
Alex Gildayc357c472018-03-21 13:54:09 +0000748 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100749 DimensionRoundingType round() const
750 {
751 return _round_type;
752 }
753
Alex Gildayc357c472018-03-21 13:54:09 +0000754 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100755 bool has_padding() const
756 {
757 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
758 }
759
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100760private:
761 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100762 unsigned int _pad_left;
763 unsigned int _pad_top;
764 unsigned int _pad_right;
765 unsigned int _pad_bottom;
766
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100767 DimensionRoundingType _round_type;
768};
769
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100770/** Fully connected layer info */
771struct FullyConnectedLayerInfo
772{
773 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
774 bool transpose_weights{ true }; /**< Transpose weights if true. */
775 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
776 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100777
778 /** Sets the weights trained data layout
779 *
780 * @param[in] layout Data layout that the weights were trained with
781 *
782 * @return Updated object
783 */
784 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
785 {
786 weights_trained_layout = layout;
787 return *this;
788 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100789 /** Sets the transpose weights flag
790 *
791 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
792 *
793 * @return Updated object
794 */
795 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
796 {
797 transpose_weights = should_transpose_weights;
798 return *this;
799 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100800};
801
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100802/** Pooling Layer Information class */
803class PoolingLayerInfo
804{
805public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000806 /** Default Constructor */
807 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000808 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000809 {
810 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100811 /** Default Constructor
812 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000813 * @param[in] pool_type Pooling type @ref PoolingType.
814 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100815 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000816 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
817 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
818 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100819 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000820 explicit PoolingLayerInfo(PoolingType pool_type,
821 unsigned int pool_size,
822 PadStrideInfo pad_stride_info = PadStrideInfo(),
823 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000824 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
825 {
826 }
827 /** Default Constructor
828 *
829 * @param[in] pool_type Pooling type @ref PoolingType.
830 * @param[in] pool_size Pooling size, in elements, across x and y.
831 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
832 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
833 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
834 * Defaults to false;
835 */
836 explicit PoolingLayerInfo(PoolingType pool_type,
837 Size2D pool_size,
838 PadStrideInfo pad_stride_info = PadStrideInfo(),
839 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000840 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
841 {
842 }
843 /** Default Constructor
844 *
845 * @note This constructor is used for global pooling
846 *
847 * @param[in] pool_type Pooling type @ref PoolingType.
848 */
849 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000850 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100851 {
852 }
Alex Gildayc357c472018-03-21 13:54:09 +0000853 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100854 PoolingType pool_type() const
855 {
856 return _pool_type;
857 }
Alex Gildayc357c472018-03-21 13:54:09 +0000858 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000859 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100860 {
861 return _pool_size;
862 }
Alex Gildayc357c472018-03-21 13:54:09 +0000863 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100864 PadStrideInfo pad_stride_info() const
865 {
866 return _pad_stride_info;
867 }
Alex Gildayc357c472018-03-21 13:54:09 +0000868 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000869 bool exclude_padding() const
870 {
871 return _exclude_padding;
872 }
Alex Gildayc357c472018-03-21 13:54:09 +0000873 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000874 bool is_global_pooling() const
875 {
876 return _is_global_pooling;
877 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100878
879private:
880 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000881 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100882 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000883 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000884 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100885};
886
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100887/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +0100888class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100889{
890public:
giuros0118870812018-09-13 09:31:40 +0100891 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100892 *
giuros0118870812018-09-13 09:31:40 +0100893 * @param[in] pooled_width Pooled width of the layer.
894 * @param[in] pooled_height Pooled height of the layer.
895 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
896 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100897 */
giuros0118870812018-09-13 09:31:40 +0100898 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
899 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100900 {
901 }
Alex Gildayc357c472018-03-21 13:54:09 +0000902 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100903 unsigned int pooled_width() const
904 {
905 return _pooled_width;
906 }
Alex Gildayc357c472018-03-21 13:54:09 +0000907 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100908 unsigned int pooled_height() const
909 {
910 return _pooled_height;
911 }
Alex Gildayc357c472018-03-21 13:54:09 +0000912 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100913 float spatial_scale() const
914 {
915 return _spatial_scale;
916 }
giuros0118870812018-09-13 09:31:40 +0100917 /** Get sampling ratio */
918 unsigned int sampling_ratio() const
919 {
920 return _sampling_ratio;
921 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100922
923private:
924 unsigned int _pooled_width;
925 unsigned int _pooled_height;
926 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +0100927 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100928};
929
giuros01c04a0e82018-10-03 12:44:35 +0100930/** Bounding Box Transform information class */
931class BoundingBoxTransformInfo
932{
933public:
934 /** Constructor
935 *
936 * @param[in] img_width Width of the original image
937 * @param[in] img_height Height, of the original image
938 * @param[in] scale Scale of the original image
939 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
940 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
941 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
942 */
Pablo Tello0cf77982018-10-24 15:32:39 +0100943 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { { 1.f, 1.f, 1.f, 1.f } }, float bbox_xform_clip =
944 4.135166556742356f)
giuros01c04a0e82018-10-03 12:44:35 +0100945 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
946 {
947 }
948
949 std::array<float, 4> weights() const
950 {
951 return _weights;
952 }
953
954 float bbox_xform_clip() const
955 {
956 return _bbox_xform_clip;
957 }
958
959 float img_height() const
960 {
961 return _img_height;
962 }
963
964 float img_width() const
965 {
966 return _img_width;
967 }
968
969 float scale() const
970 {
971 return _scale;
972 }
973
974 bool apply_scale() const
975 {
976 return _apply_scale;
977 }
978
979private:
980 float _img_width;
981 float _img_height;
982 float _scale;
983 bool _apply_scale;
984 std::array<float, 4> _weights;
985 float _bbox_xform_clip;
986};
987
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100988/** Activation Layer Information class */
989class ActivationLayerInfo
990{
991public:
992 /** Available activation functions */
993 enum class ActivationFunction
994 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +0100995 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
996 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
997 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
998 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
999 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
1000 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1001 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1002 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
1003 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
1004 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
1005 LINEAR /**< Linear ( \f$ f(x)= ax + b \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001006 };
1007
Giorgio Arena11674872018-02-07 15:38:12 +00001008 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001009 /** Default Constructor
1010 *
1011 * @param[in] f The activation function to use.
1012 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001013 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
1014 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001015 */
1016 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +00001017 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001018 {
1019 }
Alex Gildayc357c472018-03-21 13:54:09 +00001020 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001021 ActivationFunction activation() const
1022 {
1023 return _act;
1024 }
Alex Gildayc357c472018-03-21 13:54:09 +00001025 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001026 float a() const
1027 {
1028 return _a;
1029 }
Alex Gildayc357c472018-03-21 13:54:09 +00001030 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001031 float b() const
1032 {
1033 return _b;
1034 }
Alex Gildayc357c472018-03-21 13:54:09 +00001035 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +00001036 bool enabled() const
1037 {
1038 return _enabled;
1039 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001040
1041private:
Giorgio Arena11674872018-02-07 15:38:12 +00001042 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::LOGISTIC };
1043 float _a = {};
1044 float _b = {};
1045 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001046};
1047
1048/** Normalization Layer Information class */
1049class NormalizationLayerInfo
1050{
1051public:
1052 /** Default Constructor
1053 *
1054 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NORM_TYPE::CROSS_MAP
1055 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +00001056 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
1057 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
1058 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
1059 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
1060 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001061 */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001062 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
1063 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001064 {
1065 }
Alex Gildayc357c472018-03-21 13:54:09 +00001066 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001067 NormType type() const
1068 {
1069 return _type;
1070 }
Alex Gildayc357c472018-03-21 13:54:09 +00001071 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001072 uint32_t norm_size() const
1073 {
1074 return _norm_size;
1075 }
Alex Gildayc357c472018-03-21 13:54:09 +00001076 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001077 float alpha() const
1078 {
1079 return _alpha;
1080 }
Alex Gildayc357c472018-03-21 13:54:09 +00001081 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001082 float beta() const
1083 {
1084 return _beta;
1085 }
Alex Gildayc357c472018-03-21 13:54:09 +00001086 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001087 float kappa() const
1088 {
1089 return _kappa;
1090 }
Alex Gildayc357c472018-03-21 13:54:09 +00001091 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001092 bool is_cross_map() const
1093 {
1094 return _type == NormType::CROSS_MAP;
1095 }
Alex Gildayc357c472018-03-21 13:54:09 +00001096 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001097 bool is_in_map() const
1098 {
1099 return !is_cross_map();
1100 }
1101 /** Return the scaling factor of the normalization function.
1102 *
1103 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1104 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001105 *
1106 * @return The normalization scaling factor.
1107 */
1108 float scale_coeff() const
1109 {
1110 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001111 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001112 }
1113
1114private:
1115 NormType _type;
1116 uint32_t _norm_size;
1117 float _alpha;
1118 float _beta;
1119 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001120 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001121};
1122
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001123/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001124class WeightsInfo
1125{
1126public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001127 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001128 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001129 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001130 {
1131 }
1132 /** Constructor
1133 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001134 * @param[in] are_reshaped True if the weights have been reshaped
1135 * @param[in] kernel_width Kernel width.
1136 * @param[in] kernel_height Kernel height.
1137 * @param[in] num_kernels Number of convolution kernels.
1138 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001139 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001140 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1141 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001142 {
1143 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001144 /** Flag which specifies if the weights tensor has been reshaped.
1145 *
1146 * @return True if the weights tensors has been reshaped
1147 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001148 bool are_reshaped() const
1149 {
1150 return _are_reshaped;
1151 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001152 /** Return the number of convolution kernels
1153 *
1154 * @return The number of convolution kernels
1155 */
1156 unsigned int num_kernels() const
1157 {
1158 return _num_kernels;
1159 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001160 /** Return the width and height of the kernel
1161 *
1162 * @return The width and height of the kernel
1163 */
1164 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001165 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001166 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001167 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001168 bool retain_internal_weights() const
1169 {
1170 return _retain_internal_weights;
1171 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001172
1173private:
1174 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001175 const unsigned int _kernel_width;
1176 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001177 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001178 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001179};
1180
Gian Marco36a0a462018-01-12 10:21:40 +00001181/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1182 *
1183 * The matrix A can only be reshaped through @ref CLGEMMInterleave4x4Kernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1184 * Note: Optionally just for @ref CLGEMMInterleave4x4Kernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
1185 *
1186 * The matrix B can only be reshaped through @ref CLGEMMTranspose1xWKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1187 * Note: Optionally just for @ref CLGEMMTranspose1xWKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
1188 *
1189 */
1190class GEMMReshapeInfo final
1191{
1192public:
1193 /** Default constructor */
1194 GEMMReshapeInfo()
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001195 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001196 {
1197 }
1198 /** Constructor
1199 *
1200 * @param[in] m Number of matrix A rows
1201 * @param[in] n Number of matrix B columns
1202 * @param[in] k Number of matrix A columns or matrix B rows
1203 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1204 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001205 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel.
1206 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001207 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1208 * to perform 1x1 convolutions with the NHWC data layout)
Gian Marco36a0a462018-01-12 10:21:40 +00001209 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001210 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001211 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
1212 _reinterpret_input_as_3d(reinterpret_input_as_3d)
Gian Marco36a0a462018-01-12 10:21:40 +00001213 {
1214 }
1215 /** Number of matrix A rows
1216 *
1217 * @return the number of matrix A rows
1218 */
1219 int m() const
1220 {
1221 return _m;
1222 }
1223 /** Number of matrix B columns
1224 *
1225 * @return the number of matrix B columns
1226 */
1227 int n() const
1228 {
1229 return _n;
1230 }
1231 /** Number of matrix A columns or matrix B rows
1232 *
1233 * @return the number of matrix A columns or matrix B rows
1234 */
1235 int k() const
1236 {
1237 return _k;
1238 }
1239 /** Multiplication factor for the width of the 1xW transposed block
1240 *
1241 * @return the multiplication factor for the width of the 1xW transposed block
1242 */
1243 int mult_transpose1xW_width() const
1244 {
1245 return _mult_transpose1xW_width;
1246 }
1247 /** Multiplication factor for the height of the 4x4 interleaved block
1248 *
1249 * @return the multiplication factor for the height of the 4x4 interleaved block
1250 */
1251 int mult_interleave4x4_height() const
1252 {
1253 return _mult_interleave4x4_height;
1254 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001255 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1256 *
1257 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1258 * m = depth_output_gemm3d * output_height
1259 *
1260 * @return the depth of the output tensor to be used with the GEMM3D kernel
1261 */
1262 int depth_output_gemm3d() const
1263 {
1264 return _depth_output_gemm3d;
1265 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001266 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1267 *
1268 * @return True if the input tensor has to be reinterpreted as 3D tensor
1269 */
1270 bool reinterpret_input_as_3d() const
1271 {
1272 return _reinterpret_input_as_3d;
1273 };
Gian Marco36a0a462018-01-12 10:21:40 +00001274
1275private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001276 const int _m;
1277 const int _n;
1278 const int _k;
1279 const int _mult_transpose1xW_width;
1280 const int _mult_interleave4x4_height;
1281 const int _depth_output_gemm3d;
1282 const bool _reinterpret_input_as_3d;
Gian Marco36a0a462018-01-12 10:21:40 +00001283};
1284
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001285/** GEMMLowp output stage type */
1286enum class GEMMLowpOutputStageType
1287{
1288 NONE, /**< No quantization to uint8 */
1289 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1290 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1291 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1292};
1293
1294/** GEMMLowp output stage info */
1295struct GEMMLowpOutputStageInfo
1296{
1297 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1298 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1299 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1300 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1301 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1302 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1303};
1304
Gian Marco36a0a462018-01-12 10:21:40 +00001305/** GEMM information class. This class stores the necessary information to compute GEMM functions
1306 *
1307 * This object also contains the information about how matrix A and matrix B have been reshaped
1308 *
1309 */
Chunosov5124be52017-11-22 20:42:13 +07001310class GEMMInfo
1311{
1312public:
1313 /** Default constructor */
1314 GEMMInfo()
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001315 : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false), _retain_internal_weights(false), _gemmlowp_output_stage()
Chunosov5124be52017-11-22 20:42:13 +07001316 {
1317 }
1318 /** Constructor
1319 *
1320 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1321 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1322 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001323 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001324 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001325 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1326 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001327 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001328 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001329 *
Chunosov5124be52017-11-22 20:42:13 +07001330 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001331 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001332 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo())
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001333 : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001334 _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage)
Chunosov5124be52017-11-22 20:42:13 +07001335 {
1336 }
1337 /** Flag which specifies if the matrix A has been reshaped
1338 *
1339 * @return True if the matrix A has been reshaped
1340 */
1341 bool is_a_reshaped() const
1342 {
1343 return _is_a_reshaped;
1344 };
1345 /** Flag which specifies if the matrix B has been reshaped
1346 *
1347 * @return True if the matrix B has been reshaped
1348 */
1349 bool is_b_reshaped() const
1350 {
1351 return _is_b_reshaped;
1352 };
1353 /** Flag which specifies if the reshape of matrix B should executed only for the first
1354 *
1355 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1356 *
1357 * @return True if the reshaped of matrix B happens only for the first run
1358 */
1359 bool reshape_b_only_on_first_run() const
1360 {
1361 return _reshape_b_only_on_first_run;
1362 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001363 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001364 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001365 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001366 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001367 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001368 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001369 return _depth_output_gemm3d;
1370 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001371 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1372 *
1373 * @return True if the input tensor has to be reinterpreted as 3D tensor
1374 */
1375 bool reinterpret_input_as_3d() const
1376 {
1377 return _reinterpret_input_as_3d;
1378 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001379 /** Flag which specifies if the weights tensor has to be retained from previous run
1380 *
1381 * @return True if the weights tensor has to be retained
1382 */
1383 bool retain_internal_weights() const
1384 {
1385 return _retain_internal_weights;
1386 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001387 /** GEMMLowp output stage
1388 *
1389 * @return the GEMMLowp output stage info
1390 */
1391 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1392 {
1393 return _gemmlowp_output_stage;
1394 };
Chunosov5124be52017-11-22 20:42:13 +07001395
1396private:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001397 const bool _is_a_reshaped;
1398 const bool _is_b_reshaped;
1399 const bool _reshape_b_only_on_first_run;
1400 const int _depth_output_gemm3d;
1401 const bool _reinterpret_input_as_3d;
1402 const bool _retain_internal_weights;
1403 const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
Chunosov5124be52017-11-22 20:42:13 +07001404};
1405
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001406/** Winograd information */
1407struct WinogradInfo
1408{
1409 /** Default constructor
1410 *
1411 * @param[in] output_tile_sz Width and height of the output tile
1412 * @param[in] kernel_sz Width and height of the kernel
1413 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1414 * @param[in] conv_info Convolution info (Pads, strides)
1415 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1416 */
1417 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1418 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1419 {
1420 }
1421
1422 Size2D output_tile_size{}; /**< Width and height of the output tile */
1423 Size2D kernel_size{}; /**< Width and height of the kernel*/
1424 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1425 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1426 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1427};
1428
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001429/** IO formatting information class*/
1430struct IOFormatInfo
1431{
1432 /** Precision type used when printing floating point numbers */
1433 enum class PrecisionType
1434 {
1435 Default, /**< Default precision to the one that the current stream has */
1436 Custom, /**< Custom precision specified by the user using the precision parameter */
1437 Full /**< The maximum precision of the floating point representation */
1438 };
1439
1440 /** Specifies the area to be printed, used by Tensor objects */
1441 enum class PrintRegion
1442 {
1443 ValidRegion, /**< Prints the valid region of the Tensor object */
1444 NoPadding, /**< Prints the Tensor object without the padding */
1445 Full /**< Print the tensor object including padding */
1446 };
1447
Alex Gildayc357c472018-03-21 13:54:09 +00001448 /** Construct a set of IO formatting information.
1449 *
1450 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1451 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1452 * @param[in] precision Precision value for float point numbers. Default: 10.
1453 * @param[in] align_columns Whether to align columns when printed. Default: true.
1454 * @param[in] element_delim Delimeter between elements. Default: " ".
1455 * @param[in] row_delim Delimenter between rows. Default: "\n".
1456 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001457 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1458 PrecisionType precision_type = PrecisionType::Default,
1459 unsigned int precision = 10,
1460 bool align_columns = true,
1461 std::string element_delim = " ",
1462 std::string row_delim = "\n")
1463 : print_region(print_region),
1464 precision_type(precision_type),
1465 precision(precision),
1466 element_delim(element_delim),
1467 row_delim(row_delim),
1468 align_columns(align_columns)
1469 {
1470 }
1471
Alex Gildayc357c472018-03-21 13:54:09 +00001472 /** Area to be printed by Tensor objects */
1473 PrintRegion print_region;
1474 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001475 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001476 /** Floating point precision */
1477 unsigned int precision;
1478 /** Element delimeter */
1479 std::string element_delim;
1480 /** Row delimeter */
1481 std::string row_delim;
1482 /** Align columns */
1483 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001484};
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001485
1486/** Available ConvolutionMethod*/
1487enum class ConvolutionMethod
1488{
1489 GEMM, /**< Convolution using GEMM */
1490 DIRECT, /**< Direct convolution */
1491 WINOGRAD /**< Convolution using Winograd */
1492};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001493} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001494#endif /* __ARM_COMPUTE_TYPES_H__ */