blob: 241c1fe1f4d6817aa72eca0cac8b51635ce22a31 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinicc5171b2019-01-09 17:04:39 +00002 * Copyright (c) 2016-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000028#include "arm_compute/core/QAsymm8.h"
29#include "arm_compute/core/Rounding.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000030#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000031#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010033#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000035#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include <cstddef>
37#include <cstdint>
38#include <string>
39#include <utility>
40
41namespace arm_compute
42{
Georgios Pinitas583137c2017-08-31 18:12:42 +010043/** 16-bit floating point type */
44using half = half_float::half;
45
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000046/** Permutation vector */
47using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010048/** Bidirectional strides */
49using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000050
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051/** Image colour formats */
52enum class Format
53{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070054 UNKNOWN, /**< Unknown image format */
55 U8, /**< 1 channel, 1 U8 per channel */
56 S16, /**< 1 channel, 1 S16 per channel */
57 U16, /**< 1 channel, 1 U16 per channel */
58 S32, /**< 1 channel, 1 S32 per channel */
59 U32, /**< 1 channel, 1 U32 per channel */
60 F16, /**< 1 channel, 1 F16 per channel */
61 F32, /**< 1 channel, 1 F32 per channel */
62 UV88, /**< 2 channel, 1 U8 per channel */
63 RGB888, /**< 3 channels, 1 U8 per channel */
64 RGBA8888, /**< 4 channels, 1 U8 per channel */
65 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
66 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
67 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
68 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
69 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
70 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071};
72
73/** Available data types */
74enum class DataType
75{
Alex Gildayc357c472018-03-21 13:54:09 +000076 UNKNOWN, /**< Unknown data type */
77 U8, /**< unsigned 8-bit number */
78 S8, /**< signed 8-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000079 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 U16, /**< unsigned 16-bit number */
81 S16, /**< signed 16-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000082 U32, /**< unsigned 32-bit number */
83 S32, /**< signed 32-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000084 U64, /**< unsigned 64-bit number */
85 S64, /**< signed 64-bit number */
86 F16, /**< 16-bit floating-point number */
87 F32, /**< 32-bit floating-point number */
88 F64, /**< 64-bit floating-point number */
89 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090};
91
Daniil Efremov02bf80d2017-11-22 00:26:51 +070092/** Available Sampling Policies */
93enum class SamplingPolicy
94{
95 CENTER, /**< Samples are taken at pixel center */
96 TOP_LEFT /**< Samples are taken at pixel top left corner */
97};
98
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099/** Constant value of the border pixels when using BorderMode::CONSTANT */
100constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
101
Alex Gildayc357c472018-03-21 13:54:09 +0000102/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103constexpr float SCALE_PYRAMID_HALF = 0.5f;
104
Alex Gildayc357c472018-03-21 13:54:09 +0000105/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
107
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000108/** [DataLayout enum definition] **/
109
Georgios Pinitas4074c992018-01-30 18:13:46 +0000110/** Supported tensor data layouts */
111enum class DataLayout
112{
Alex Gildayc357c472018-03-21 13:54:09 +0000113 UNKNOWN, /**< Unknown data layout */
114 NCHW, /**< Num samples, channels, height, width */
115 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000116};
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000117/** [DataLayout enum definition] **/
Georgios Pinitas4074c992018-01-30 18:13:46 +0000118
Isabella Gottardid17a6772018-02-27 17:41:55 +0000119/** Supported tensor data layout dimensions */
120enum class DataLayoutDimension
121{
Alex Gildayc357c472018-03-21 13:54:09 +0000122 CHANNEL, /**< channel */
123 HEIGHT, /**< height */
124 WIDTH, /**< width */
125 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000126};
127
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000128/** Available ConvolutionMethod*/
129enum class ConvolutionMethod
130{
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100131 GEMM, /**< Convolution using GEMM */
132 DIRECT, /**< Direct convolution */
133 WINOGRAD, /**< Convolution using Winograd */
134 FFT /**< Convolution using FFT */
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000135};
136
giuros0146a49a02019-04-01 13:50:22 +0100137/** Available DeconvolutionMethod*/
138enum class DeconvolutionMethod
139{
140 GEMM, /**< Deconvolution using GEMM */
141 DIRECT, /**< Direct deconvolution */
142};
143
Usama Arif89890c62019-03-19 10:57:05 +0000144/** Padding mode to use for PadLayer */
145enum class PaddingMode
146{
147 CONSTANT,
148 REFLECT,
149 SYMMETRIC
150};
151
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000152/** Supported comparison operations */
153enum class ComparisonOperation
154{
155 Equal, /**< Equal comparison ( \f$ x == y \f$ ) */
156 NotEqual, /**< NotEqual comparison ( \f$ x != y \f$ ) */
157 Greater, /**< Greater comparison ( \f$ x > y \f$ ) */
158 GreaterEqual, /**< Greater equal comparison ( \f$ x >= y \f$ ) */
159 Less, /**< Less comparison ( \f$ x < y \f$ ) */
160 LessEqual /**< Less equal comparison ( \f$ x <= y \f$ ) */
161};
162
Michel Iwaniec00633802017-10-12 14:14:15 +0100163/** Quantization settings (used for QASYMM8 data type) */
164struct QuantizationInfo
165{
Alex Gildayc357c472018-03-21 13:54:09 +0000166 /** Default constructor */
Georgios Pinitasf8d8f3a2018-06-06 17:57:04 +0100167 QuantizationInfo() noexcept
168 : scale(0.0f),
169 offset(0)
Michel Iwaniec00633802017-10-12 14:14:15 +0100170 {
171 }
172
Alex Gildayc357c472018-03-21 13:54:09 +0000173 /** Construct quantization info.
174 *
175 * @param[in] scale Scale.
176 * @param[in] offset Offset.
177 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100178 QuantizationInfo(float scale, int offset)
179 : scale(scale), offset(offset)
180 {
181 }
182
Alex Gildayc357c472018-03-21 13:54:09 +0000183 /** Check whether equal to a given quantization info.
184 *
185 * @param[in] other Other quantization info.
186 *
187 * @return True if the given quantization info is the same.
188 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100189 bool operator==(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700190 {
191 return scale == other.scale && offset == other.offset;
192 }
193
Alex Gildayc357c472018-03-21 13:54:09 +0000194 /** Check whether not equal to a given quantization info.
195 *
196 * @param[in] other Other quantization info.
197 *
198 * @return True if the given quantization info is not the same.
199 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100200 bool operator!=(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700201 {
202 return !(*this == other);
203 }
204
Michel Iwaniec00633802017-10-12 14:14:15 +0100205 float scale; /**< scale */
206 int offset; /**< offset */
207
Alex Gildayc357c472018-03-21 13:54:09 +0000208 /** Quantizes a value using the scale/offset in this QuantizationInfo
209 *
210 * @param[in] value Value to quantize.
211 * @param[in] rounding_policy Policy to use when rounding.
212 *
213 * @return the quantized value.
214 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000215 qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100216 {
217 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000218 return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
Michel Iwaniec00633802017-10-12 14:14:15 +0100219 }
220
Alex Gildayc357c472018-03-21 13:54:09 +0000221 /** Dequantizes a value using the scale/offset in this QuantizationInfo
222 *
223 * @param[in] value Value to dequantize.
224 *
225 * @return the original value before quantization.
226 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000227 float dequantize(qasymm8_t value) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100228 {
229 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000230 return scvt_f32_qasymm8(value, scale, offset);
Michel Iwaniec00633802017-10-12 14:14:15 +0100231 }
232
Alex Gildayc357c472018-03-21 13:54:09 +0000233 /** Indicates whether this QuantizationInfo has valid settings or not
234 *
235 * @return True if the this has invalid settings.
236 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100237 bool empty() const
238 {
239 return scale == 0;
240 }
241};
242
Alex Gildayc357c472018-03-21 13:54:09 +0000243/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100244struct ValidRegion
245{
Alex Gildayc357c472018-03-21 13:54:09 +0000246 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100247 ValidRegion()
248 : anchor{}, shape{}
249 {
250 }
251
Alex Gildayc357c472018-03-21 13:54:09 +0000252 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100253 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000254 /** Allow instances of this class to be move constructed */
255 ValidRegion(ValidRegion &&) = default;
256 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100257 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000258 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100259 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000260 /** Default destructor */
261 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100262
Alex Gildayc357c472018-03-21 13:54:09 +0000263 /** Constructor for a valid region with default number of dimensions
264 *
265 * @param[in] an_anchor Anchor for the start of the valid region.
266 * @param[in] a_shape Shape of the valid region.
267 *
268 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000269 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
270 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100271 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000272 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
273 }
274
Alex Gildayc357c472018-03-21 13:54:09 +0000275 /** Constructor for a valid region with specified number of dimensions
276 *
277 * @param[in] an_anchor Anchor for the start of the valid region.
278 * @param[in] a_shape Shape of the valid region.
279 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
280 *
281 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000282 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
283 : anchor{ an_anchor }, shape{ a_shape }
284 {
285 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
286 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100287 }
288
289 /** Return the start of the valid region for the given dimension @p d */
290 int start(unsigned int d) const
291 {
292 return anchor[d];
293 }
294
295 /** Return the end of the valid region for the given dimension @p d */
296 int end(unsigned int d) const
297 {
298 return anchor[d] + shape[d];
299 }
300
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000301 /** Accessor to set the value of anchor and shape for one of the dimensions.
302 *
303 * @param[in] dimension Dimension for which the value is set.
304 * @param[in] start Value to be set in anchor for the dimension.
305 * @param[in] size Value to be set in shape for the dimension.
306 *
307 * @return *this.
308 */
309 ValidRegion &set(size_t dimension, int start, size_t size)
310 {
311 anchor.set(dimension, start);
312 shape.set(dimension, size);
313 return *this;
314 }
315
Alex Gildayc357c472018-03-21 13:54:09 +0000316 Coordinates anchor; /**< Anchor for the start of the valid region. */
317 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100318};
319
320/** Methods available to handle borders */
321enum class BorderMode
322{
323 UNDEFINED, /**< Borders are left undefined */
324 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
325 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
326};
327
328/** Container for 2D border size */
329struct BorderSize
330{
331 /** Empty border, i.e. no border */
332 constexpr BorderSize()
333 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
334 {
335 }
336
337 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100338 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100339 : top{ size }, right{ size }, bottom{ size }, left{ size }
340 {
341 }
342
343 /** Border with same size for top/bottom and left/right */
344 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
345 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
346 {
347 }
348
349 /** Border with different sizes */
350 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
351 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
352 {
353 }
354
355 /** Check if the entire border is zero */
356 constexpr bool empty() const
357 {
358 return top == 0 && right == 0 && bottom == 0 && left == 0;
359 }
360
361 /** Check if the border is the same size on all sides */
362 constexpr bool uniform() const
363 {
364 return top == right && top == bottom && top == left;
365 }
366
Alex Gildayc357c472018-03-21 13:54:09 +0000367 /** Scale this border size.
368 *
369 * @param[in] scale Scale to multiply border size by.
370 *
371 * @return *this.
372 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100373 BorderSize &operator*=(float scale)
374 {
375 top *= scale;
376 right *= scale;
377 bottom *= scale;
378 left *= scale;
379
380 return *this;
381 }
382
Alex Gildayc357c472018-03-21 13:54:09 +0000383 /** Scale a copy of this border size.
384 *
385 * @param[in] scale Scale to multiply border size by.
386 *
387 * @return a scaled copy of this.
388 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100389 BorderSize operator*(float scale)
390 {
391 BorderSize size = *this;
392 size *= scale;
393
394 return size;
395 }
396
Alex Gildayc357c472018-03-21 13:54:09 +0000397 /** Limit this border size.
398 *
399 * @param[in] limit Border size to limit this border size to.
400 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100401 void limit(const BorderSize &limit)
402 {
403 top = std::min(top, limit.top);
404 right = std::min(right, limit.right);
405 bottom = std::min(bottom, limit.bottom);
406 left = std::min(left, limit.left);
407 }
408
Alex Gildayc357c472018-03-21 13:54:09 +0000409 unsigned int top; /**< top of the border */
410 unsigned int right; /**< right of the border */
411 unsigned int bottom; /**< bottom of the border */
412 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100413};
414
Alex Gildayc357c472018-03-21 13:54:09 +0000415/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100416using PaddingSize = BorderSize;
417
418/** Policy to handle overflow */
419enum class ConvertPolicy
420{
421 WRAP, /**< Wrap around */
422 SATURATE /**< Saturate */
423};
424
425/** Interpolation method */
426enum class InterpolationPolicy
427{
428 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
429 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
430 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
431};
432
433/** Bilinear Interpolation method used by LKTracker */
434enum class BilinearInterpolation
435{
Alex Gildayc357c472018-03-21 13:54:09 +0000436 BILINEAR_OLD_NEW, /**< Old-new method */
437 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100438};
439
440/** Threshold mode */
441enum class ThresholdType
442{
443 BINARY, /**< Threshold with one value */
444 RANGE /**< Threshold with two values*/
445};
446
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100447/** Termination criteria */
448enum class Termination
449{
Alex Gildayc357c472018-03-21 13:54:09 +0000450 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
451 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
452 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100453};
454
455/** Magnitude calculation type. */
456enum class MagnitudeType
457{
458 L1NORM, /**< L1 normalization type */
459 L2NORM /**< L2 normalization type */
460};
461
462/** Phase calculation type.
463 *
464 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
465 */
466enum class PhaseType
467{
468 SIGNED, /**< Angle range: [0, 360] */
469 UNSIGNED /**< Angle range: [0, 180] */
470};
471
472/** Keypoint type */
473struct KeyPoint
474{
475 int32_t x{ 0 }; /**< X coordinates */
476 int32_t y{ 0 }; /**< Y coordinates */
477 float strength{ 0.f }; /**< Strength of the point */
478 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
479 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
480 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
481 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
482};
483
Alex Gildayc357c472018-03-21 13:54:09 +0000484/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100485using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
486
487/** Rectangle type */
488struct Rectangle
489{
490 uint16_t x; /**< Top-left x coordinate */
491 uint16_t y; /**< Top-left y coordinate */
492 uint16_t width; /**< Width of the rectangle */
493 uint16_t height; /**< Height of the rectangle */
494};
495
496/** Coordinate type */
497struct Coordinates2D
498{
499 int32_t x; /**< X coordinates */
500 int32_t y; /**< Y coordinates */
501};
502
503/** Coordinate type */
504struct Coordinates3D
505{
506 uint32_t x; /**< X coordinates */
507 uint32_t y; /**< Y coordinates */
508 uint32_t z; /**< Z coordinates */
509};
510
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100511/** Padding information as a pair of unsigned int start/end */
512using PaddingInfo = std::pair<uint32_t, uint32_t>;
513
514/** List of padding information */
515using PaddingList = std::vector<PaddingInfo>;
516
giuros013175fcf2018-11-21 09:59:17 +0000517/** Information to produce a tiled version of a Tensor */
518using Multiples = std::vector<uint32_t>;
519
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100520/** Available channels */
521enum class Channel
522{
523 UNKNOWN, /** Unknown channel format */
524 C0, /**< First channel (used by formats with unknown channel types). */
525 C1, /**< Second channel (used by formats with unknown channel types). */
526 C2, /**< Third channel (used by formats with unknown channel types). */
527 C3, /**< Fourth channel (used by formats with unknown channel types). */
528 R, /**< Red channel. */
529 G, /**< Green channel. */
530 B, /**< Blue channel. */
531 A, /**< Alpha channel. */
532 Y, /**< Luma channel. */
533 U, /**< Cb/U channel. */
534 V /**< Cr/V/Value channel. */
535};
536
537/** Available matrix patterns */
538enum class MatrixPattern
539{
540 BOX, /**< Box pattern matrix. */
541 CROSS, /**< Cross pattern matrix. */
542 DISK, /**< Disk pattern matrix. */
543 OTHER /**< Any other matrix pattern. */
544};
545
546/** Available non linear functions. */
547enum class NonLinearFilterFunction : unsigned
548{
549 MEDIAN = 0, /**< Non linear median filter. */
550 MIN = 1, /**< Non linear erode. */
551 MAX = 2, /**< Non linear dilate. */
552};
553
Georgios Pinitasd9769582017-08-03 10:19:40 +0100554/** Available reduction operations */
555enum class ReductionOperation
556{
Michalis Spyrou7930db42018-11-22 17:36:28 +0000557 ARG_IDX_MAX, /**< Index of the max value */
Manuel Bottinib412fab2018-12-10 17:40:23 +0000558 ARG_IDX_MIN, /**< Index of the min value */
559 MEAN_SUM, /**< Mean of sum */
560 PROD, /**< Product */
561 SUM_SQUARE, /**< Sum of squares */
Usama Arifa4a08ad2019-05-20 12:38:33 +0100562 SUM, /**< Sum */
563 MIN, /**< Min */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100564};
565
giuros01164a2722018-11-20 18:34:46 +0000566/** Available element-wise operations */
567enum class ArithmeticOperation
568{
569 ADD, /**< (x + y) */
570 SUB, /**< (x - y) */
571 DIV, /**< (x / y) */
572 MIN, /**< Min(x, y) */
573 MAX, /**< Max(x, y) */
574 SQUARED_DIFF, /**< (x - y)^2 */
Usama Arif81e671e2019-05-13 13:33:14 +0100575 POWER, /**< x ^ y */
giuros01164a2722018-11-20 18:34:46 +0000576};
577
Michalis Spyroue9362622018-11-23 17:41:37 +0000578/** Available element wise unary operations */
579enum class ElementWiseUnary
580{
581 RSQRT, /**< Reverse square root */
582 EXP, /**< Exponential */
Usama Ariff6e475c2019-05-10 12:06:28 +0100583 NEG, /**< Negate */
Usama Arifc255aa72019-05-13 16:26:29 +0100584 LOG, /**< Natural Logarithm */
Manuel Bottini6ac59922019-05-15 14:06:02 +0100585 ABS, /**< Absolute value */
Michalis Spyrou0af44182019-05-17 14:04:47 +0100586 SIN, /**< Sine */
Michalis Spyroue9362622018-11-23 17:41:37 +0000587};
588
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100589/** The normalization type used for the normalization layer */
590enum class NormType
591{
592 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
593 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
594 CROSS_MAP /**< Normalization applied cross maps */
595};
596
597/** Normalization type for Histogram of Oriented Gradients (HOG) */
598enum class HOGNormType
599{
600 L2_NORM = 1, /**< L2-norm */
601 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
602 L1_NORM = 3 /**< L1 norm */
603};
604
605/** Detection window used for the object detection. The detection window keeps the following information:
606 *
607 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
608 * -# Index of the class used for evaluating which class the detection window belongs to
609 * -# Confidence value (score) obtained with the classifier
610 */
611struct DetectionWindow
612{
613 uint16_t x{ 0 }; /**< Top-left x coordinate */
614 uint16_t y{ 0 }; /**< Top-left y coordinate */
615 uint16_t width{ 0 }; /**< Width of the detection window */
616 uint16_t height{ 0 }; /**< Height of the detection window */
617 uint16_t idx_class{ 0 }; /**< Index of the class */
618 float score{ 0.f }; /**< Confidence value for the detection window */
619};
620
621/** Dimension rounding type when down-scaling on CNNs
622 * @note Used in pooling and convolution layer
623 */
624enum class DimensionRoundingType
625{
626 FLOOR, /**< Floor rounding */
627 CEIL /**< Ceil rounding */
628};
629
630/** Available pooling types */
631enum class PoolingType
632{
633 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100634 AVG, /**< Average Pooling */
635 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100636};
637
Michalis Spyrou2709d612018-09-19 09:46:47 +0100638/** Available non maxima suppression types */
639enum class NMSType
640{
641 LINEAR, /**< Linear NMS */
642 GAUSSIAN, /**< Gaussian NMS */
643 ORIGINAL /**< Original NMS */
644};
645
646/** BoxWithNonMaximaSuppressionLimit Information class */
647class BoxNMSLimitInfo final
648{
649public:
650 /** Constructor
651 *
652 * @param[in] score_thresh (Optional) Score threshold.
653 * @param[in] nms (Optional) NMS value
654 * @param[in] detections (Optional) Number of detections
655 * @param[in] soft_nms_enabled (Optional) Enable SoftNMS
656 * @param[in] soft_nms_method (Optional) Soft NMS method
657 * @param[in] soft_nms_sigma (Optional) Soft NMS sigma value
658 * @param[in] soft_nms_min_score_thres (Optional) Soft NMS minimum score threshold
Manuel Bottini5209be52019-02-13 16:34:56 +0000659 * @param[in] suppress_size (Optional) Filter out boxes based on their size. Defaults to false
660 * @param[in] min_size (Optional) Smaller boxes than min_size will be filtered out. Defaults to 1
661 * @param[in] im_width (Optional) Boxes whose centers (on the x axis) is beyond im_width will be filtered. Defaults to 1
662 * @param[in] im_height (Optional) Boxes whose centers (on the y axis) is beyond im_height will be filtered. Defaults to 1
Michalis Spyrou2709d612018-09-19 09:46:47 +0100663 */
664 BoxNMSLimitInfo(float score_thresh = 0.05f, float nms = 0.3f,
665 int detections = 100, bool soft_nms_enabled = false,
666 NMSType soft_nms_method = NMSType::LINEAR,
Manuel Bottini5209be52019-02-13 16:34:56 +0000667 float soft_nms_sigma = 0.5f, float soft_nms_min_score_thres = 0.001f, bool suppress_size = false, float min_size = 1.0f, float im_width = 1.0f, float im_height = 1.0f)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100668 : _score_thresh(score_thresh), _nms(nms), _detections_per_im(detections), _soft_nms_enabled(soft_nms_enabled), _soft_nms_method(soft_nms_method), _soft_nms_sigma(soft_nms_sigma),
Manuel Bottini5209be52019-02-13 16:34:56 +0000669 _soft_nms_min_score_thres(soft_nms_min_score_thres), _suppress_size(suppress_size), _min_size(min_size), _im_width(im_width), _im_height(im_height)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100670 {
671 }
672 /** Get the score threshold */
673 float score_thresh() const
674 {
675 return _score_thresh;
676 }
677 /** Get the NMS */
678 float nms() const
679 {
680 return _nms;
681 }
682 /** Get the number of detections */
683 int detections_per_im() const
684 {
685 return _detections_per_im;
686 }
687 /** Check if soft NMS is enabled */
688 bool soft_nms_enabled() const
689 {
690 return _soft_nms_enabled;
691 }
692 /** Get soft NMS method */
693 NMSType soft_nms_method() const
694 {
695 return _soft_nms_method;
696 }
697 /** Get soft NMS sigma */
698 float soft_nms_sigma() const
699 {
700 return _soft_nms_sigma;
701 }
702 /** Get soft nms min score threshold */
703 float soft_nms_min_score_thres() const
704 {
705 return _soft_nms_min_score_thres;
706 }
Manuel Bottini5209be52019-02-13 16:34:56 +0000707 /** Get if NMS will suppress boxes based on their size/position */
708 bool suppress_size() const
709 {
710 return _suppress_size;
711 }
712 /** Get size suppression threshold */
713 float min_size() const
714 {
715 return _min_size;
716 }
717 /** Get image width (NMS may suppress boxes whose center sits beyond the image width) */
718 float im_width() const
719 {
720 return _im_width;
721 }
722 /** Get image height (NMS may suppress boxes whose center sits beyond the image height) */
723 float im_height() const
724 {
725 return _im_height;
726 }
Michalis Spyrou2709d612018-09-19 09:46:47 +0100727
728private:
729 float _score_thresh;
730 float _nms;
731 int _detections_per_im;
732 bool _soft_nms_enabled;
733 NMSType _soft_nms_method;
734 float _soft_nms_sigma;
735 float _soft_nms_min_score_thres;
Manuel Bottini5209be52019-02-13 16:34:56 +0000736 bool _suppress_size;
737 float _min_size;
738 float _im_width;
739 float _im_height;
Michalis Spyrou2709d612018-09-19 09:46:47 +0100740};
741
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100742/** Padding and stride information class */
743class PadStrideInfo
744{
745public:
746 /** Constructor
747 *
748 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
749 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
750 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
751 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
752 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
753 */
754 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
755 unsigned int pad_x = 0, unsigned int pad_y = 0,
756 DimensionRoundingType round = DimensionRoundingType::FLOOR)
757 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100758 _pad_left(pad_x),
759 _pad_top(pad_y),
760 _pad_right(pad_x),
761 _pad_bottom(pad_y),
762 _round_type(round)
763 {
764 }
765 /** Constructor
766 *
767 * @param[in] stride_x Stride, in elements, across x.
768 * @param[in] stride_y Stride, in elements, across y.
769 * @param[in] pad_left Padding across x on the left, in elements.
770 * @param[in] pad_top Padding across y on the top, in elements.
771 * @param[in] pad_right Padding across x on the right, in elements.
772 * @param[in] pad_bottom Padding across y on the bottom, in elements.
773 * @param[in] round Dimensions rounding.
774 */
775 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
776 unsigned int pad_left, unsigned int pad_right,
777 unsigned int pad_top, unsigned int pad_bottom,
778 DimensionRoundingType round)
779 : _stride(std::make_pair(stride_x, stride_y)),
780 _pad_left(pad_left),
781 _pad_top(pad_top),
782 _pad_right(pad_right),
783 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100784 _round_type(round)
785 {
786 }
Alex Gildayc357c472018-03-21 13:54:09 +0000787 /** Get the stride.
788 *
789 * @return a pair: stride x, stride y.
790 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100791 std::pair<unsigned int, unsigned int> stride() const
792 {
793 return _stride;
794 }
Alex Gildayc357c472018-03-21 13:54:09 +0000795 /** Check whether the padding is symmetric.
796 *
797 * @return True if the padding is symmetric.
798 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000799 bool padding_is_symmetric() const
800 {
801 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
802 }
Alex Gildayc357c472018-03-21 13:54:09 +0000803 /** Get the padding.
804 *
805 * @note This should only be used when the padding is symmetric.
806 *
807 * @return a pair: padding left/right, padding top/bottom
808 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100809 std::pair<unsigned int, unsigned int> pad() const
810 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100811 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000812 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100813 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100814 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100815
Alex Gildayc357c472018-03-21 13:54:09 +0000816 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100817 unsigned int pad_left() const
818 {
819 return _pad_left;
820 }
Alex Gildayc357c472018-03-21 13:54:09 +0000821 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100822 unsigned int pad_right() const
823 {
824 return _pad_right;
825 }
Alex Gildayc357c472018-03-21 13:54:09 +0000826 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100827 unsigned int pad_top() const
828 {
829 return _pad_top;
830 }
Alex Gildayc357c472018-03-21 13:54:09 +0000831 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100832 unsigned int pad_bottom() const
833 {
834 return _pad_bottom;
835 }
836
Alex Gildayc357c472018-03-21 13:54:09 +0000837 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100838 DimensionRoundingType round() const
839 {
840 return _round_type;
841 }
842
Alex Gildayc357c472018-03-21 13:54:09 +0000843 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100844 bool has_padding() const
845 {
846 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
847 }
848
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100849private:
850 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100851 unsigned int _pad_left;
852 unsigned int _pad_top;
853 unsigned int _pad_right;
854 unsigned int _pad_bottom;
855
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100856 DimensionRoundingType _round_type;
857};
858
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100859/** Fully connected layer info */
860struct FullyConnectedLayerInfo
861{
862 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
863 bool transpose_weights{ true }; /**< Transpose weights if true. */
864 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
865 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100866
867 /** Sets the weights trained data layout
868 *
869 * @param[in] layout Data layout that the weights were trained with
870 *
871 * @return Updated object
872 */
873 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
874 {
875 weights_trained_layout = layout;
876 return *this;
877 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100878 /** Sets the transpose weights flag
879 *
880 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
881 *
882 * @return Updated object
883 */
884 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
885 {
886 transpose_weights = should_transpose_weights;
887 return *this;
888 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100889};
890
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100891/** PriorBox layer info */
892class PriorBoxLayerInfo final
893{
894public:
895 /** Default Constructor */
896 PriorBoxLayerInfo()
897 : _min_sizes(),
898 _variances(),
899 _offset(),
900 _flip(true),
901 _clip(false),
902 _max_sizes(),
903 _aspect_ratios(),
904 _img_size(),
905 _steps()
906 {
907 }
908 /** Constructor
909 *
910 * @param[in] min_sizes Min sizes vector.
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100911 * @param[in] variances Variances vector.
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100912 * @param[in] offset Offset value.
913 * @param[in] flip (Optional) Flip the aspect ratios.
914 * @param[in] clip (Optional) Clip coordinates so that they're within [0,1].
915 * @param[in] max_sizes (Optional) Max sizes vector.
916 * @param[in] aspect_ratios (Optional) Aspect ratios of the boxes.
917 * @param[in] img_size (Optional) Image size.
918 * @param[in] steps (Optional) Step values.
919 */
920 PriorBoxLayerInfo(const std::vector<float> &min_sizes, const std::vector<float> &variances, float offset, bool flip = true, bool clip = false,
Pablo Tello32521432018-11-15 14:43:10 +0000921 const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {},
922 const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100923 : _min_sizes(min_sizes),
924 _variances(variances),
925 _offset(offset),
926 _flip(flip),
927 _clip(clip),
928 _max_sizes(max_sizes),
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100929 _aspect_ratios(),
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100930 _img_size(img_size),
931 _steps(steps)
932 {
933 _aspect_ratios.push_back(1.);
934 for(unsigned int i = 0; i < aspect_ratios.size(); ++i)
935 {
936 float ar = aspect_ratios[i];
937 bool already_exist = false;
938 for(auto ar_new : _aspect_ratios)
939 {
940 if(fabs(ar - ar_new) < 1e-6)
941 {
942 already_exist = true;
943 break;
944 }
945 }
946 if(!already_exist)
947 {
948 _aspect_ratios.push_back(ar);
949 if(flip)
950 {
951 _aspect_ratios.push_back(1.f / ar);
952 }
953 }
954 }
955 }
956 /** Get min sizes. */
957 std::vector<float> min_sizes() const
958 {
959 return _min_sizes;
960 }
961 /** Get min variances. */
962 std::vector<float> variances() const
963 {
964 return _variances;
965 }
966 /** Get the step coordinates */
967 std::array<float, 2> steps() const
968 {
969 return _steps;
970 }
971 /** Get the image size coordinates */
972 Coordinates2D img_size() const
973 {
974 return _img_size;
975 }
976 /** Get the offset */
977 float offset() const
978 {
979 return _offset;
980 }
981 /** Get the flip value */
982 bool flip() const
983 {
984 return _flip;
985 }
986 /** Get the clip value */
987 bool clip() const
988 {
989 return _clip;
990 }
991 /** Get max sizes. */
992 std::vector<float> max_sizes() const
993 {
994 return _max_sizes;
995 }
996 /** Get aspect ratios. */
997 std::vector<float> aspect_ratios() const
998 {
999 return _aspect_ratios;
1000 }
1001
1002private:
1003 std::vector<float> _min_sizes;
1004 std::vector<float> _variances;
1005 float _offset;
1006 bool _flip;
1007 bool _clip;
1008 std::vector<float> _max_sizes;
1009 std::vector<float> _aspect_ratios;
1010 Coordinates2D _img_size;
1011 std::array<float, 2> _steps;
1012};
1013
Isabella Gottardi05e56442018-11-16 11:26:52 +00001014/** Available Detection Output code types */
1015enum class DetectionOutputLayerCodeType
1016{
1017 CORNER, /**< Use box corners */
1018 CENTER_SIZE, /**< Use box centers and size */
1019 CORNER_SIZE, /**< Use box centers and size */
1020 TF_CENTER /**< Use box centers and size but flip x and y co-ordinates */
1021};
1022
1023/** Detection Output layer info */
1024class DetectionOutputLayerInfo final
1025{
1026public:
1027 /** Default Constructor */
1028 DetectionOutputLayerInfo()
1029 : _num_classes(),
1030 _share_location(),
1031 _code_type(DetectionOutputLayerCodeType::CORNER),
1032 _keep_top_k(),
1033 _nms_threshold(),
1034 _top_k(),
1035 _background_label_id(),
1036 _confidence_threshold(),
1037 _variance_encoded_in_target(false),
1038 _eta(),
1039 _num_loc_classes()
1040 {
1041 _num_loc_classes = _share_location ? 1 : _num_classes;
1042 }
1043 /** Constructor
1044 *
1045 * @param[in] num_classes Number of classes to be predicted.
1046 * @param[in] share_location If true, bounding box are shared among different classes.
1047 * @param[in] code_type Type of coding method for bbox.
1048 * @param[in] keep_top_k Number of total bounding boxes to be kept per image after NMS step.
1049 * @param[in] nms_threshold Threshold to be used in NMS.
1050 * @param[in] top_k (Optional) Number of boxes per image with top confidence scores that are fed into the NMS algorithm. Default set to -1.
1051 * @param[in] background_label_id (Optional) Background label ID. If there is no background class, set it as -1.
1052 * @param[in] confidence_threshold (Optional) Only consider detections whose confidences are larger than a threshold. Default set to -FLT_MAX.
1053 * @param[in] variance_encoded_in_target (Optional) If true, variance is encoded in target. Otherwise we need to adjust the predicted offset accordingly.Default set to false.
1054 * @param[in] eta (Optional) Eta.
1055 */
1056 DetectionOutputLayerInfo(int num_classes, bool share_location, DetectionOutputLayerCodeType code_type, int keep_top_k, float nms_threshold, int top_k = -1, int background_label_id = -1,
1057 float confidence_threshold = std::numeric_limits<float>::lowest(), bool variance_encoded_in_target = false, float eta = 1)
1058 : _num_classes(num_classes),
1059 _share_location(share_location),
1060 _code_type(code_type),
1061 _keep_top_k(keep_top_k),
1062 _nms_threshold(nms_threshold),
1063 _top_k(top_k),
1064 _background_label_id(background_label_id),
1065 _confidence_threshold(confidence_threshold),
1066 _variance_encoded_in_target(variance_encoded_in_target),
1067 _eta(eta),
1068 _num_loc_classes()
1069 {
1070 _num_loc_classes = _share_location ? 1 : _num_classes;
1071 }
1072 /** Get num classes. */
1073 int num_classes() const
1074 {
1075 return _num_classes;
1076 }
1077 /** Get share location. */
1078 bool share_location() const
1079 {
1080 return _share_location;
1081 }
1082 /** Get detection output code type. */
1083 DetectionOutputLayerCodeType code_type() const
1084 {
1085 return _code_type;
1086 }
1087 /** Get if variance encoded in target. */
1088 bool variance_encoded_in_target() const
1089 {
1090 return _variance_encoded_in_target;
1091 }
1092 /** Get the number of total bounding boxes to be kept per image. */
1093 int keep_top_k() const
1094 {
1095 return _keep_top_k;
1096 }
1097 /** Get nms threshold. */
1098 float nms_threshold() const
1099 {
1100 return _nms_threshold;
1101 }
1102 /** Get eta. */
1103 float eta() const
1104 {
1105 return _eta;
1106 }
1107 /** Get background label ID. */
1108 int background_label_id() const
1109 {
1110 return _background_label_id;
1111 }
1112 /** Get confidence threshold. */
1113 float confidence_threshold() const
1114 {
1115 return _confidence_threshold;
1116 }
1117 /** Get top K. */
1118 int top_k() const
1119 {
1120 return _top_k;
1121 }
1122 /** Get number of location classes. */
1123 int num_loc_classes() const
1124 {
1125 return _num_loc_classes;
1126 }
1127
1128private:
1129 int _num_classes;
1130 bool _share_location;
1131 DetectionOutputLayerCodeType _code_type;
1132 int _keep_top_k;
1133 float _nms_threshold;
1134 int _top_k;
1135 int _background_label_id;
1136 float _confidence_threshold;
1137 bool _variance_encoded_in_target;
1138 float _eta;
1139 int _num_loc_classes;
1140};
1141
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001142/** Pooling Layer Information class */
1143class PoolingLayerInfo
1144{
1145public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001146 /** Default Constructor */
1147 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001148 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001149 {
1150 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001151 /** Default Constructor
1152 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001153 * @param[in] pool_type Pooling type @ref PoolingType.
1154 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001155 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001156 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1157 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1158 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001159 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001160 explicit PoolingLayerInfo(PoolingType pool_type,
1161 unsigned int pool_size,
1162 PadStrideInfo pad_stride_info = PadStrideInfo(),
1163 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001164 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1165 {
1166 }
1167 /** Default Constructor
1168 *
1169 * @param[in] pool_type Pooling type @ref PoolingType.
1170 * @param[in] pool_size Pooling size, in elements, across x and y.
1171 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
1172 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1173 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1174 * Defaults to false;
1175 */
1176 explicit PoolingLayerInfo(PoolingType pool_type,
1177 Size2D pool_size,
1178 PadStrideInfo pad_stride_info = PadStrideInfo(),
1179 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001180 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1181 {
1182 }
1183 /** Default Constructor
1184 *
1185 * @note This constructor is used for global pooling
1186 *
1187 * @param[in] pool_type Pooling type @ref PoolingType.
1188 */
1189 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001190 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001191 {
1192 }
Alex Gildayc357c472018-03-21 13:54:09 +00001193 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001194 PoolingType pool_type() const
1195 {
1196 return _pool_type;
1197 }
Alex Gildayc357c472018-03-21 13:54:09 +00001198 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001199 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001200 {
1201 return _pool_size;
1202 }
Alex Gildayc357c472018-03-21 13:54:09 +00001203 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001204 PadStrideInfo pad_stride_info() const
1205 {
1206 return _pad_stride_info;
1207 }
Alex Gildayc357c472018-03-21 13:54:09 +00001208 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001209 bool exclude_padding() const
1210 {
1211 return _exclude_padding;
1212 }
Alex Gildayc357c472018-03-21 13:54:09 +00001213 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001214 bool is_global_pooling() const
1215 {
1216 return _is_global_pooling;
1217 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001218
1219private:
1220 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001221 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001222 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001223 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001224 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001225};
1226
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001227/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +01001228class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001229{
1230public:
giuros0118870812018-09-13 09:31:40 +01001231 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001232 *
giuros0118870812018-09-13 09:31:40 +01001233 * @param[in] pooled_width Pooled width of the layer.
1234 * @param[in] pooled_height Pooled height of the layer.
1235 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
1236 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001237 */
giuros0118870812018-09-13 09:31:40 +01001238 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
1239 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001240 {
1241 }
Alex Gildayc357c472018-03-21 13:54:09 +00001242 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001243 unsigned int pooled_width() const
1244 {
1245 return _pooled_width;
1246 }
Alex Gildayc357c472018-03-21 13:54:09 +00001247 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001248 unsigned int pooled_height() const
1249 {
1250 return _pooled_height;
1251 }
Alex Gildayc357c472018-03-21 13:54:09 +00001252 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001253 float spatial_scale() const
1254 {
1255 return _spatial_scale;
1256 }
giuros0118870812018-09-13 09:31:40 +01001257 /** Get sampling ratio */
1258 unsigned int sampling_ratio() const
1259 {
1260 return _sampling_ratio;
1261 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001262
1263private:
1264 unsigned int _pooled_width;
1265 unsigned int _pooled_height;
1266 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +01001267 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001268};
1269
Manuel Bottini5209be52019-02-13 16:34:56 +00001270/** Generate Proposals Information class */
1271class GenerateProposalsInfo
1272{
1273public:
1274 /** Constructor
1275 *
1276 * @param[in] im_width Width of the original image
1277 * @param[in] im_height Height of the original image
1278 * @param[in] im_scale Scale applied to the original image
1279 * @param[in] spatial_scale (Optional)Scale applied to the feature map. Defaults to 1.0
1280 * @param[in] pre_nms_topN (Optional)Number of the best scores to be selected from the transformations. Defaults to 6000.
1281 * @param[in] post_nms_topN (Optional)Number of the best scores to be selected from the NMS operation. Defaults to 300.
1282 * @param[in] nms_thres (Optional)NMS overlap threshold. Defaults to 0.7.
1283 * @param[in] min_size (Optional)Size used to validate the anchors produced. Defaults to 16.
1284 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region of interest). Defaults to 4.
1285 */
1286 GenerateProposalsInfo(float im_width, float im_height, float im_scale, float spatial_scale = 1.0, int pre_nms_topN = 6000, int post_nms_topN = 300, float nms_thres = 0.7, float min_size = 16.0,
1287 size_t values_per_roi = 4)
1288 : _im_height(im_height), _im_width(im_width), _im_scale(im_scale), _spatial_scale(spatial_scale), _pre_nms_topN(pre_nms_topN), _post_nms_topN(post_nms_topN), _nms_thres(nms_thres),
1289 _min_size(min_size), _values_per_roi(values_per_roi)
1290 {
1291 }
1292
1293 /* Get the original height */
1294 float im_height() const
1295 {
1296 return _im_height;
1297 }
1298 /* Get the original width */
1299 float im_width() const
1300 {
1301 return _im_width;
1302 }
1303 /* Get the image scale */
1304 float im_scale() const
1305 {
1306 return _im_scale;
1307 }
1308 /* Get the value of how many best scores to select (before NMS) */
1309 int pre_nms_topN() const
1310 {
1311 return _pre_nms_topN;
1312 }
1313 /* Get the value of how many best scores to select (after NMS) */
1314 int post_nms_topN() const
1315 {
1316 return _post_nms_topN;
1317 }
1318 /* Get the NMS overlap threshold */
1319 float nms_thres() const
1320 {
1321 return _nms_thres;
1322 }
1323 /* Get the minimal size */
1324 float min_size() const
1325 {
1326 return _min_size;
1327 }
1328 /* Get the spatial scale to be applied to the feature maps */
1329 float spatial_scale() const
1330 {
1331 return _spatial_scale;
1332 }
1333 /* Get the values used to represent a ROI(Region of interest)*/
1334 size_t values_per_roi() const
1335 {
1336 return _values_per_roi;
1337 }
1338
1339private:
1340 float _im_height;
1341 float _im_width;
1342 float _im_scale;
1343 float _spatial_scale;
1344 int _pre_nms_topN;
1345 int _post_nms_topN;
1346 float _nms_thres;
1347 float _min_size;
1348 size_t _values_per_roi;
1349};
1350
1351/** ComputeAnchors information class */
1352class ComputeAnchorsInfo
1353{
1354public:
1355 /** Constructor
1356 *
1357 * @param[in] feat_width Feature map width
1358 * @param[in] feat_height Feature map height
1359 * @param[in] spatial_scale Feature map scale
1360 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region Of Interest). Defaults to 4
1361 */
1362 ComputeAnchorsInfo(float feat_width, float feat_height, float spatial_scale, size_t values_per_roi = 4)
1363 : _feat_height(feat_height),
1364 _feat_width(feat_width),
1365 _spatial_scale(spatial_scale),
1366 _values_per_roi(values_per_roi)
1367 {
1368 }
1369
1370 /* Get the height of the feature map */
1371 float feat_height() const
1372 {
1373 return _feat_height;
1374 }
1375
1376 /* Get the width of the feature map */
1377 float feat_width() const
1378 {
1379 return _feat_width;
1380 }
1381
1382 /* Get the scale of the feature map */
1383 float spatial_scale() const
1384 {
1385 return _spatial_scale;
1386 }
1387
1388 /* Get the values used to represent a ROI(Region Of Interest)*/
1389 size_t values_per_roi() const
1390 {
1391 return _values_per_roi;
1392 }
1393
1394private:
1395 float _feat_height;
1396 float _feat_width;
1397 float _spatial_scale;
1398 size_t _values_per_roi;
1399};
1400
giuros01c04a0e82018-10-03 12:44:35 +01001401/** Bounding Box Transform information class */
giuros01d696cb62018-11-16 10:39:59 +00001402class BoundingBoxTransformInfo final
giuros01c04a0e82018-10-03 12:44:35 +01001403{
1404public:
1405 /** Constructor
1406 *
giuros01d696cb62018-11-16 10:39:59 +00001407 * @param[in] img_width Width of the original image
1408 * @param[in] img_height Height, of the original image
1409 * @param[in] scale Scale of the original image
1410 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
1411 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
1412 * @param[in] correct_transform_coords (Optional)Correct bounding box transform coordinates. Defaults to false
1413 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
giuros01c04a0e82018-10-03 12:44:35 +01001414 */
giuros01d696cb62018-11-16 10:39:59 +00001415 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { { 1.f, 1.f, 1.f, 1.f } }, bool correct_transform_coords =
1416 false,
1417 float bbox_xform_clip =
1418 4.135166556742356f)
1419 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _correct_transform_coords(correct_transform_coords), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
giuros01c04a0e82018-10-03 12:44:35 +01001420 {
1421 }
1422
1423 std::array<float, 4> weights() const
1424 {
1425 return _weights;
1426 }
1427
1428 float bbox_xform_clip() const
1429 {
1430 return _bbox_xform_clip;
1431 }
1432
1433 float img_height() const
1434 {
1435 return _img_height;
1436 }
1437
1438 float img_width() const
1439 {
1440 return _img_width;
1441 }
1442
1443 float scale() const
1444 {
1445 return _scale;
1446 }
1447
1448 bool apply_scale() const
1449 {
1450 return _apply_scale;
1451 }
1452
giuros01d696cb62018-11-16 10:39:59 +00001453 bool correct_transform_coords() const
1454 {
1455 return _correct_transform_coords;
1456 }
1457
giuros01c04a0e82018-10-03 12:44:35 +01001458private:
1459 float _img_width;
1460 float _img_height;
1461 float _scale;
1462 bool _apply_scale;
giuros01d696cb62018-11-16 10:39:59 +00001463 bool _correct_transform_coords;
giuros01c04a0e82018-10-03 12:44:35 +01001464 std::array<float, 4> _weights;
1465 float _bbox_xform_clip;
1466};
1467
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001468/** Activation Layer Information class */
1469class ActivationLayerInfo
1470{
1471public:
1472 /** Available activation functions */
1473 enum class ActivationFunction
1474 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001475 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
1476 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
1477 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
1478 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
1479 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
Manuel Bottini581c8982019-02-07 10:31:57 +00001480 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x) = \begin{cases} \alpha x & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001481 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1482 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
1483 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
1484 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
Usama Arif6a98a6e2019-05-10 17:07:27 +01001485 LINEAR, /**< Linear ( \f$ f(x)= ax + b \f$ ) */
1486 IDENTITY /**< Identity ( \f$ f(x)= x \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001487 };
1488
Giorgio Arena11674872018-02-07 15:38:12 +00001489 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001490 /** Default Constructor
1491 *
1492 * @param[in] f The activation function to use.
1493 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001494 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
1495 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001496 */
1497 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +00001498 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001499 {
1500 }
Alex Gildayc357c472018-03-21 13:54:09 +00001501 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001502 ActivationFunction activation() const
1503 {
1504 return _act;
1505 }
Alex Gildayc357c472018-03-21 13:54:09 +00001506 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001507 float a() const
1508 {
1509 return _a;
1510 }
Alex Gildayc357c472018-03-21 13:54:09 +00001511 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001512 float b() const
1513 {
1514 return _b;
1515 }
Alex Gildayc357c472018-03-21 13:54:09 +00001516 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +00001517 bool enabled() const
1518 {
1519 return _enabled;
1520 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001521
1522private:
Usama Arif6a98a6e2019-05-10 17:07:27 +01001523 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::IDENTITY };
Giorgio Arena11674872018-02-07 15:38:12 +00001524 float _a = {};
1525 float _b = {};
1526 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001527};
1528
1529/** Normalization Layer Information class */
1530class NormalizationLayerInfo
1531{
1532public:
1533 /** Default Constructor
1534 *
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001535 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NormType::CROSS_MAP
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001536 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +00001537 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
1538 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
1539 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
1540 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
1541 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001542 */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001543 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
1544 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001545 {
1546 }
Alex Gildayc357c472018-03-21 13:54:09 +00001547 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001548 NormType type() const
1549 {
1550 return _type;
1551 }
Alex Gildayc357c472018-03-21 13:54:09 +00001552 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001553 uint32_t norm_size() const
1554 {
1555 return _norm_size;
1556 }
Alex Gildayc357c472018-03-21 13:54:09 +00001557 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001558 float alpha() const
1559 {
1560 return _alpha;
1561 }
Alex Gildayc357c472018-03-21 13:54:09 +00001562 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001563 float beta() const
1564 {
1565 return _beta;
1566 }
Alex Gildayc357c472018-03-21 13:54:09 +00001567 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001568 float kappa() const
1569 {
1570 return _kappa;
1571 }
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001572 /** Get the is_scaled value */
1573 bool is_scaled() const
1574 {
1575 return _is_scaled;
1576 }
Alex Gildayc357c472018-03-21 13:54:09 +00001577 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001578 bool is_cross_map() const
1579 {
1580 return _type == NormType::CROSS_MAP;
1581 }
Alex Gildayc357c472018-03-21 13:54:09 +00001582 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001583 bool is_in_map() const
1584 {
1585 return !is_cross_map();
1586 }
1587 /** Return the scaling factor of the normalization function.
1588 *
1589 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1590 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001591 *
1592 * @return The normalization scaling factor.
1593 */
1594 float scale_coeff() const
1595 {
1596 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001597 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001598 }
1599
1600private:
1601 NormType _type;
1602 uint32_t _norm_size;
1603 float _alpha;
1604 float _beta;
1605 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001606 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001607};
1608
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001609/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001610class WeightsInfo
1611{
1612public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001613 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001614 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001615 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001616 {
1617 }
1618 /** Constructor
1619 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001620 * @param[in] are_reshaped True if the weights have been reshaped
1621 * @param[in] kernel_width Kernel width.
1622 * @param[in] kernel_height Kernel height.
1623 * @param[in] num_kernels Number of convolution kernels.
1624 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001625 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001626 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1627 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001628 {
1629 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001630 /** Flag which specifies if the weights tensor has been reshaped.
1631 *
1632 * @return True if the weights tensors has been reshaped
1633 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001634 bool are_reshaped() const
1635 {
1636 return _are_reshaped;
1637 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001638 /** Return the number of convolution kernels
1639 *
1640 * @return The number of convolution kernels
1641 */
1642 unsigned int num_kernels() const
1643 {
1644 return _num_kernels;
1645 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001646 /** Return the width and height of the kernel
1647 *
1648 * @return The width and height of the kernel
1649 */
1650 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001651 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001652 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001653 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001654 bool retain_internal_weights() const
1655 {
1656 return _retain_internal_weights;
1657 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001658
1659private:
1660 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001661 const unsigned int _kernel_width;
1662 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001663 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001664 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001665};
1666
Gian Marco36a0a462018-01-12 10:21:40 +00001667/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1668 *
Gian Marco Iodice5fc07aa2019-05-15 17:08:02 +01001669 * The matrix A can only be reshaped through @ref CLGEMMReshapeLHSMatrixKernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1670 * Note: Optionally just for @ref CLGEMMReshapeLHSMatrixKernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
Gian Marco36a0a462018-01-12 10:21:40 +00001671 *
giuros018b6b4a92018-12-18 19:01:33 +00001672 * The matrix B can only be reshaped through @ref CLGEMMReshapeRHSMatrixKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1673 * Note: Optionally just for @ref CLGEMMReshapeRHSMatrixKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
Gian Marco36a0a462018-01-12 10:21:40 +00001674 *
1675 */
1676class GEMMReshapeInfo final
1677{
1678public:
1679 /** Default constructor */
1680 GEMMReshapeInfo()
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001681 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001682 {
1683 }
1684 /** Constructor
1685 *
1686 * @param[in] m Number of matrix A rows
1687 * @param[in] n Number of matrix B columns
1688 * @param[in] k Number of matrix A columns or matrix B rows
1689 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1690 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001691 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel.
1692 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001693 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1694 * to perform 1x1 convolutions with the NHWC data layout)
Gian Marco36a0a462018-01-12 10:21:40 +00001695 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001696 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001697 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
1698 _reinterpret_input_as_3d(reinterpret_input_as_3d)
Gian Marco36a0a462018-01-12 10:21:40 +00001699 {
1700 }
1701 /** Number of matrix A rows
1702 *
1703 * @return the number of matrix A rows
1704 */
1705 int m() const
1706 {
1707 return _m;
1708 }
1709 /** Number of matrix B columns
1710 *
1711 * @return the number of matrix B columns
1712 */
1713 int n() const
1714 {
1715 return _n;
1716 }
1717 /** Number of matrix A columns or matrix B rows
1718 *
1719 * @return the number of matrix A columns or matrix B rows
1720 */
1721 int k() const
1722 {
1723 return _k;
1724 }
1725 /** Multiplication factor for the width of the 1xW transposed block
1726 *
1727 * @return the multiplication factor for the width of the 1xW transposed block
1728 */
1729 int mult_transpose1xW_width() const
1730 {
1731 return _mult_transpose1xW_width;
1732 }
1733 /** Multiplication factor for the height of the 4x4 interleaved block
1734 *
1735 * @return the multiplication factor for the height of the 4x4 interleaved block
1736 */
1737 int mult_interleave4x4_height() const
1738 {
1739 return _mult_interleave4x4_height;
1740 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001741 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1742 *
1743 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1744 * m = depth_output_gemm3d * output_height
1745 *
1746 * @return the depth of the output tensor to be used with the GEMM3D kernel
1747 */
1748 int depth_output_gemm3d() const
1749 {
1750 return _depth_output_gemm3d;
1751 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001752 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1753 *
1754 * @return True if the input tensor has to be reinterpreted as 3D tensor
1755 */
1756 bool reinterpret_input_as_3d() const
1757 {
1758 return _reinterpret_input_as_3d;
1759 };
Gian Marco36a0a462018-01-12 10:21:40 +00001760
1761private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001762 const int _m;
1763 const int _n;
1764 const int _k;
1765 const int _mult_transpose1xW_width;
1766 const int _mult_interleave4x4_height;
1767 const int _depth_output_gemm3d;
1768 const bool _reinterpret_input_as_3d;
Gian Marco36a0a462018-01-12 10:21:40 +00001769};
1770
giuros016d109962019-01-07 17:47:19 +00001771struct DepthwiseConvolutionReshapeInfo
1772{
1773 unsigned int c0{ 1 }; /**< Number of channels processed by the depth-wise convolution */
1774 bool transpose{ false }; /**< True if the block MxC0 (where M is the area of the filter i.e. KwxKh) has to be transposed */
1775};
1776
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001777/** GEMMLowp output stage type */
1778enum class GEMMLowpOutputStageType
1779{
1780 NONE, /**< No quantization to uint8 */
1781 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1782 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1783 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1784};
1785
1786/** GEMMLowp output stage info */
1787struct GEMMLowpOutputStageInfo
1788{
1789 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1790 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1791 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1792 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1793 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1794 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1795};
1796
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +00001797/** GEMM LHS (Left Hand Side) matrix information */
1798struct GEMMLHSMatrixInfo
1799{
1800 unsigned int m0{ 1 }; /**< Number of rows processed by the matrix multiplication */
1801 unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
1802 unsigned int v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
1803 bool transpose{ true }; /**< True if the (m0xk0) block has to be transposed before been stored */
1804 bool interleave{ true }; /**< True if the v0 (m0xk0) blocks have to be interleaved in the output row */
1805};
1806
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +00001807/** GEMM RHS (Right Hand Side) matrix information */
1808struct GEMMRHSMatrixInfo
1809{
1810 unsigned int n0{ 1 }; /**< Number of columns processed by the matrix multiplication */
1811 unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
1812 unsigned int h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
1813 bool transpose{ true }; /**< True if the (k0xn0) block has to be transposed before been stored */
1814 bool interleave{ true }; /**< True if the h0 (k0xn0) blocks have to be interleaved in the output row */
1815};
1816
Gian Marco36a0a462018-01-12 10:21:40 +00001817/** GEMM information class. This class stores the necessary information to compute GEMM functions
1818 *
1819 * This object also contains the information about how matrix A and matrix B have been reshaped
1820 *
1821 */
Chunosov5124be52017-11-22 20:42:13 +07001822class GEMMInfo
1823{
1824public:
1825 /** Default constructor */
1826 GEMMInfo()
Anthony Barbier08a45172018-11-30 17:20:26 +00001827 : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(true), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false), _retain_internal_weights(false), _gemmlowp_output_stage(),
1828 _fp_mixed_precision(false)
Chunosov5124be52017-11-22 20:42:13 +07001829 {
1830 }
1831 /** Constructor
1832 *
1833 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1834 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1835 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001836 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001837 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001838 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1839 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001840 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001841 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001842 * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001843 *
Chunosov5124be52017-11-22 20:42:13 +07001844 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001845 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001846 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001847 : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001848 _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage), _fp_mixed_precision(fp_mixed_precision)
Chunosov5124be52017-11-22 20:42:13 +07001849 {
1850 }
1851 /** Flag which specifies if the matrix A has been reshaped
1852 *
1853 * @return True if the matrix A has been reshaped
1854 */
1855 bool is_a_reshaped() const
1856 {
1857 return _is_a_reshaped;
1858 };
1859 /** Flag which specifies if the matrix B has been reshaped
1860 *
1861 * @return True if the matrix B has been reshaped
1862 */
1863 bool is_b_reshaped() const
1864 {
1865 return _is_b_reshaped;
1866 };
1867 /** Flag which specifies if the reshape of matrix B should executed only for the first
1868 *
1869 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1870 *
1871 * @return True if the reshaped of matrix B happens only for the first run
1872 */
1873 bool reshape_b_only_on_first_run() const
1874 {
1875 return _reshape_b_only_on_first_run;
1876 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001877 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001878 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001879 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001880 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001881 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001882 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001883 return _depth_output_gemm3d;
1884 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001885 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1886 *
1887 * @return True if the input tensor has to be reinterpreted as 3D tensor
1888 */
1889 bool reinterpret_input_as_3d() const
1890 {
1891 return _reinterpret_input_as_3d;
1892 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001893 /** Flag which specifies if the weights tensor has to be retained from previous run
1894 *
1895 * @return True if the weights tensor has to be retained
1896 */
1897 bool retain_internal_weights() const
1898 {
1899 return _retain_internal_weights;
1900 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001901 /** GEMMLowp output stage
1902 *
1903 * @return the GEMMLowp output stage info
1904 */
1905 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1906 {
1907 return _gemmlowp_output_stage;
1908 };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001909 /** Flag which specifies if a wider accumulator should be used.
1910 *
1911 * @return True if a wider accumulator has to be used
1912 */
1913 bool fp_mixed_precision() const
1914 {
1915 return _fp_mixed_precision;
1916 };
Chunosov5124be52017-11-22 20:42:13 +07001917
1918private:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001919 const bool _is_a_reshaped;
1920 const bool _is_b_reshaped;
1921 const bool _reshape_b_only_on_first_run;
1922 const int _depth_output_gemm3d;
1923 const bool _reinterpret_input_as_3d;
1924 const bool _retain_internal_weights;
1925 const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001926 const bool _fp_mixed_precision;
Chunosov5124be52017-11-22 20:42:13 +07001927};
1928
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001929/** Winograd information */
1930struct WinogradInfo
1931{
1932 /** Default constructor
1933 *
1934 * @param[in] output_tile_sz Width and height of the output tile
1935 * @param[in] kernel_sz Width and height of the kernel
1936 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1937 * @param[in] conv_info Convolution info (Pads, strides)
1938 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1939 */
1940 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1941 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1942 {
1943 }
1944
1945 Size2D output_tile_size{}; /**< Width and height of the output tile */
1946 Size2D kernel_size{}; /**< Width and height of the kernel*/
1947 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1948 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1949 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1950};
1951
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001952/** IO formatting information class*/
1953struct IOFormatInfo
1954{
1955 /** Precision type used when printing floating point numbers */
1956 enum class PrecisionType
1957 {
1958 Default, /**< Default precision to the one that the current stream has */
1959 Custom, /**< Custom precision specified by the user using the precision parameter */
1960 Full /**< The maximum precision of the floating point representation */
1961 };
1962
1963 /** Specifies the area to be printed, used by Tensor objects */
1964 enum class PrintRegion
1965 {
1966 ValidRegion, /**< Prints the valid region of the Tensor object */
1967 NoPadding, /**< Prints the Tensor object without the padding */
1968 Full /**< Print the tensor object including padding */
1969 };
1970
Alex Gildayc357c472018-03-21 13:54:09 +00001971 /** Construct a set of IO formatting information.
1972 *
1973 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1974 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1975 * @param[in] precision Precision value for float point numbers. Default: 10.
1976 * @param[in] align_columns Whether to align columns when printed. Default: true.
1977 * @param[in] element_delim Delimeter between elements. Default: " ".
1978 * @param[in] row_delim Delimenter between rows. Default: "\n".
1979 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001980 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1981 PrecisionType precision_type = PrecisionType::Default,
1982 unsigned int precision = 10,
1983 bool align_columns = true,
1984 std::string element_delim = " ",
1985 std::string row_delim = "\n")
1986 : print_region(print_region),
1987 precision_type(precision_type),
1988 precision(precision),
1989 element_delim(element_delim),
1990 row_delim(row_delim),
1991 align_columns(align_columns)
1992 {
1993 }
1994
Alex Gildayc357c472018-03-21 13:54:09 +00001995 /** Area to be printed by Tensor objects */
1996 PrintRegion print_region;
1997 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001998 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001999 /** Floating point precision */
2000 unsigned int precision;
2001 /** Element delimeter */
2002 std::string element_delim;
2003 /** Row delimeter */
2004 std::string row_delim;
2005 /** Align columns */
2006 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01002007};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00002008} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01002009#endif /* __ARM_COMPUTE_TYPES_H__ */