blob: 2c17f273a5db2bb518423991645d4954564a036d [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinicc5171b2019-01-09 17:04:39 +00002 * Copyright (c) 2016-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010028#include "arm_compute/core/QuantizationInfo.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000029#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000030#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010031#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010032#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000034#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035#include <cstddef>
36#include <cstdint>
37#include <string>
38#include <utility>
39
40namespace arm_compute
41{
Georgios Pinitas583137c2017-08-31 18:12:42 +010042/** 16-bit floating point type */
43using half = half_float::half;
44
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000045/** Permutation vector */
46using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010047/** Bidirectional strides */
48using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000049
Anthony Barbier6ff3b192017-09-04 18:44:23 +010050/** Image colour formats */
51enum class Format
52{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070053 UNKNOWN, /**< Unknown image format */
54 U8, /**< 1 channel, 1 U8 per channel */
55 S16, /**< 1 channel, 1 S16 per channel */
56 U16, /**< 1 channel, 1 U16 per channel */
57 S32, /**< 1 channel, 1 S32 per channel */
58 U32, /**< 1 channel, 1 U32 per channel */
59 F16, /**< 1 channel, 1 F16 per channel */
60 F32, /**< 1 channel, 1 F32 per channel */
61 UV88, /**< 2 channel, 1 U8 per channel */
62 RGB888, /**< 3 channels, 1 U8 per channel */
63 RGBA8888, /**< 4 channels, 1 U8 per channel */
64 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
65 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
66 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
67 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
68 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
69 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010070};
71
72/** Available data types */
73enum class DataType
74{
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010075 UNKNOWN, /**< Unknown data type */
76 U8, /**< unsigned 8-bit number */
77 S8, /**< signed 8-bit number */
78 QSYMM8, /**< quantized, symmetric fixed-point 8-bit number */
79 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 QSYMM8_PER_CHANNEL, /**< quantized, symmetric per channel fixed-point 8-bit number */
81 U16, /**< unsigned 16-bit number */
82 S16, /**< signed 16-bit number */
Manuel Bottini3689fcd2019-06-14 17:18:12 +010083 QSYMM16, /**< quantized, symmetric fixed-point 16-bit number */
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010084 U32, /**< unsigned 32-bit number */
85 S32, /**< signed 32-bit number */
86 U64, /**< unsigned 64-bit number */
87 S64, /**< signed 64-bit number */
88 F16, /**< 16-bit floating-point number */
89 F32, /**< 32-bit floating-point number */
90 F64, /**< 64-bit floating-point number */
91 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010092};
93
Daniil Efremov02bf80d2017-11-22 00:26:51 +070094/** Available Sampling Policies */
95enum class SamplingPolicy
96{
97 CENTER, /**< Samples are taken at pixel center */
98 TOP_LEFT /**< Samples are taken at pixel top left corner */
99};
100
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100101/** Constant value of the border pixels when using BorderMode::CONSTANT */
102constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
103
Alex Gildayc357c472018-03-21 13:54:09 +0000104/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100105constexpr float SCALE_PYRAMID_HALF = 0.5f;
106
Alex Gildayc357c472018-03-21 13:54:09 +0000107/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100108constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
109
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000110/** [DataLayout enum definition] **/
111
Georgios Pinitas4074c992018-01-30 18:13:46 +0000112/** Supported tensor data layouts */
113enum class DataLayout
114{
Alex Gildayc357c472018-03-21 13:54:09 +0000115 UNKNOWN, /**< Unknown data layout */
116 NCHW, /**< Num samples, channels, height, width */
117 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000118};
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000119/** [DataLayout enum definition] **/
Georgios Pinitas4074c992018-01-30 18:13:46 +0000120
Isabella Gottardid17a6772018-02-27 17:41:55 +0000121/** Supported tensor data layout dimensions */
122enum class DataLayoutDimension
123{
Alex Gildayc357c472018-03-21 13:54:09 +0000124 CHANNEL, /**< channel */
125 HEIGHT, /**< height */
126 WIDTH, /**< width */
127 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000128};
129
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000130/** Available ConvolutionMethod*/
131enum class ConvolutionMethod
132{
Vidhya Sudhan Loganathan8ec0bb62019-04-23 10:40:44 +0100133 GEMM, /**< Convolution using GEMM */
134 DIRECT, /**< Direct convolution */
135 WINOGRAD, /**< Convolution using Winograd */
136 FFT /**< Convolution using FFT */
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000137};
138
giuros0146a49a02019-04-01 13:50:22 +0100139/** Available DeconvolutionMethod*/
140enum class DeconvolutionMethod
141{
142 GEMM, /**< Deconvolution using GEMM */
143 DIRECT, /**< Direct deconvolution */
144};
145
Manuel Bottini2732cca2019-05-28 11:44:41 +0100146/** Available FuseBatchNormalizationType*/
147enum class FuseBatchNormalizationType
148{
149 CONVOLUTION, /**< For Convolution weights */
150 DEPTHWISECONVOLUTION /**< For Depthwise Convolution weights*/
151};
152
Usama Arif89890c62019-03-19 10:57:05 +0000153/** Padding mode to use for PadLayer */
154enum class PaddingMode
155{
156 CONSTANT,
157 REFLECT,
158 SYMMETRIC
159};
160
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000161/** Supported comparison operations */
162enum class ComparisonOperation
163{
164 Equal, /**< Equal comparison ( \f$ x == y \f$ ) */
165 NotEqual, /**< NotEqual comparison ( \f$ x != y \f$ ) */
166 Greater, /**< Greater comparison ( \f$ x > y \f$ ) */
167 GreaterEqual, /**< Greater equal comparison ( \f$ x >= y \f$ ) */
168 Less, /**< Less comparison ( \f$ x < y \f$ ) */
169 LessEqual /**< Less equal comparison ( \f$ x <= y \f$ ) */
170};
171
Alex Gildayc357c472018-03-21 13:54:09 +0000172/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100173struct ValidRegion
174{
Alex Gildayc357c472018-03-21 13:54:09 +0000175 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100176 ValidRegion()
177 : anchor{}, shape{}
178 {
179 }
180
Alex Gildayc357c472018-03-21 13:54:09 +0000181 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100182 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000183 /** Allow instances of this class to be move constructed */
184 ValidRegion(ValidRegion &&) = default;
185 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100186 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000187 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100188 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000189 /** Default destructor */
190 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100191
Alex Gildayc357c472018-03-21 13:54:09 +0000192 /** Constructor for a valid region with default number of dimensions
193 *
194 * @param[in] an_anchor Anchor for the start of the valid region.
195 * @param[in] a_shape Shape of the valid region.
196 *
197 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000198 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
199 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100200 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000201 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
202 }
203
Alex Gildayc357c472018-03-21 13:54:09 +0000204 /** Constructor for a valid region with specified number of dimensions
205 *
206 * @param[in] an_anchor Anchor for the start of the valid region.
207 * @param[in] a_shape Shape of the valid region.
208 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
209 *
210 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000211 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
212 : anchor{ an_anchor }, shape{ a_shape }
213 {
214 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
215 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100216 }
217
218 /** Return the start of the valid region for the given dimension @p d */
219 int start(unsigned int d) const
220 {
221 return anchor[d];
222 }
223
224 /** Return the end of the valid region for the given dimension @p d */
225 int end(unsigned int d) const
226 {
227 return anchor[d] + shape[d];
228 }
229
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000230 /** Accessor to set the value of anchor and shape for one of the dimensions.
231 *
232 * @param[in] dimension Dimension for which the value is set.
233 * @param[in] start Value to be set in anchor for the dimension.
234 * @param[in] size Value to be set in shape for the dimension.
235 *
236 * @return *this.
237 */
238 ValidRegion &set(size_t dimension, int start, size_t size)
239 {
240 anchor.set(dimension, start);
241 shape.set(dimension, size);
242 return *this;
243 }
244
Alex Gildayc357c472018-03-21 13:54:09 +0000245 Coordinates anchor; /**< Anchor for the start of the valid region. */
246 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100247};
248
249/** Methods available to handle borders */
250enum class BorderMode
251{
252 UNDEFINED, /**< Borders are left undefined */
253 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
254 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
255};
256
257/** Container for 2D border size */
258struct BorderSize
259{
260 /** Empty border, i.e. no border */
261 constexpr BorderSize()
262 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
263 {
264 }
265
266 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100267 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100268 : top{ size }, right{ size }, bottom{ size }, left{ size }
269 {
270 }
271
272 /** Border with same size for top/bottom and left/right */
273 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
274 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
275 {
276 }
277
278 /** Border with different sizes */
279 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
280 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
281 {
282 }
283
284 /** Check if the entire border is zero */
285 constexpr bool empty() const
286 {
287 return top == 0 && right == 0 && bottom == 0 && left == 0;
288 }
289
290 /** Check if the border is the same size on all sides */
291 constexpr bool uniform() const
292 {
293 return top == right && top == bottom && top == left;
294 }
295
Alex Gildayc357c472018-03-21 13:54:09 +0000296 /** Scale this border size.
297 *
298 * @param[in] scale Scale to multiply border size by.
299 *
300 * @return *this.
301 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100302 BorderSize &operator*=(float scale)
303 {
304 top *= scale;
305 right *= scale;
306 bottom *= scale;
307 left *= scale;
308
309 return *this;
310 }
311
Alex Gildayc357c472018-03-21 13:54:09 +0000312 /** Scale a copy of this border size.
313 *
314 * @param[in] scale Scale to multiply border size by.
315 *
316 * @return a scaled copy of this.
317 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100318 BorderSize operator*(float scale)
319 {
320 BorderSize size = *this;
321 size *= scale;
322
323 return size;
324 }
325
Alex Gildayc357c472018-03-21 13:54:09 +0000326 /** Limit this border size.
327 *
328 * @param[in] limit Border size to limit this border size to.
329 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100330 void limit(const BorderSize &limit)
331 {
332 top = std::min(top, limit.top);
333 right = std::min(right, limit.right);
334 bottom = std::min(bottom, limit.bottom);
335 left = std::min(left, limit.left);
336 }
337
Alex Gildayc357c472018-03-21 13:54:09 +0000338 unsigned int top; /**< top of the border */
339 unsigned int right; /**< right of the border */
340 unsigned int bottom; /**< bottom of the border */
341 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100342};
343
Alex Gildayc357c472018-03-21 13:54:09 +0000344/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100345using PaddingSize = BorderSize;
346
347/** Policy to handle overflow */
348enum class ConvertPolicy
349{
350 WRAP, /**< Wrap around */
351 SATURATE /**< Saturate */
352};
353
354/** Interpolation method */
355enum class InterpolationPolicy
356{
357 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
358 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
359 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
360};
361
362/** Bilinear Interpolation method used by LKTracker */
363enum class BilinearInterpolation
364{
Alex Gildayc357c472018-03-21 13:54:09 +0000365 BILINEAR_OLD_NEW, /**< Old-new method */
366 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100367};
368
369/** Threshold mode */
370enum class ThresholdType
371{
372 BINARY, /**< Threshold with one value */
373 RANGE /**< Threshold with two values*/
374};
375
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100376/** Termination criteria */
377enum class Termination
378{
Alex Gildayc357c472018-03-21 13:54:09 +0000379 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
380 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
381 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100382};
383
384/** Magnitude calculation type. */
385enum class MagnitudeType
386{
387 L1NORM, /**< L1 normalization type */
388 L2NORM /**< L2 normalization type */
389};
390
391/** Phase calculation type.
392 *
393 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
394 */
395enum class PhaseType
396{
397 SIGNED, /**< Angle range: [0, 360] */
398 UNSIGNED /**< Angle range: [0, 180] */
399};
400
401/** Keypoint type */
402struct KeyPoint
403{
404 int32_t x{ 0 }; /**< X coordinates */
405 int32_t y{ 0 }; /**< Y coordinates */
406 float strength{ 0.f }; /**< Strength of the point */
407 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
408 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
409 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
410 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
411};
412
Alex Gildayc357c472018-03-21 13:54:09 +0000413/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100414using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
415
416/** Rectangle type */
417struct Rectangle
418{
419 uint16_t x; /**< Top-left x coordinate */
420 uint16_t y; /**< Top-left y coordinate */
421 uint16_t width; /**< Width of the rectangle */
422 uint16_t height; /**< Height of the rectangle */
423};
424
425/** Coordinate type */
426struct Coordinates2D
427{
428 int32_t x; /**< X coordinates */
429 int32_t y; /**< Y coordinates */
430};
431
432/** Coordinate type */
433struct Coordinates3D
434{
435 uint32_t x; /**< X coordinates */
436 uint32_t y; /**< Y coordinates */
437 uint32_t z; /**< Z coordinates */
438};
439
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100440/** Padding information as a pair of unsigned int start/end */
441using PaddingInfo = std::pair<uint32_t, uint32_t>;
442
443/** List of padding information */
444using PaddingList = std::vector<PaddingInfo>;
445
giuros013175fcf2018-11-21 09:59:17 +0000446/** Information to produce a tiled version of a Tensor */
447using Multiples = std::vector<uint32_t>;
448
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100449/** Available channels */
450enum class Channel
451{
452 UNKNOWN, /** Unknown channel format */
453 C0, /**< First channel (used by formats with unknown channel types). */
454 C1, /**< Second channel (used by formats with unknown channel types). */
455 C2, /**< Third channel (used by formats with unknown channel types). */
456 C3, /**< Fourth channel (used by formats with unknown channel types). */
457 R, /**< Red channel. */
458 G, /**< Green channel. */
459 B, /**< Blue channel. */
460 A, /**< Alpha channel. */
461 Y, /**< Luma channel. */
462 U, /**< Cb/U channel. */
463 V /**< Cr/V/Value channel. */
464};
465
466/** Available matrix patterns */
467enum class MatrixPattern
468{
469 BOX, /**< Box pattern matrix. */
470 CROSS, /**< Cross pattern matrix. */
471 DISK, /**< Disk pattern matrix. */
472 OTHER /**< Any other matrix pattern. */
473};
474
475/** Available non linear functions. */
476enum class NonLinearFilterFunction : unsigned
477{
478 MEDIAN = 0, /**< Non linear median filter. */
479 MIN = 1, /**< Non linear erode. */
480 MAX = 2, /**< Non linear dilate. */
481};
482
Georgios Pinitasd9769582017-08-03 10:19:40 +0100483/** Available reduction operations */
484enum class ReductionOperation
485{
Michalis Spyrou7930db42018-11-22 17:36:28 +0000486 ARG_IDX_MAX, /**< Index of the max value */
Manuel Bottinib412fab2018-12-10 17:40:23 +0000487 ARG_IDX_MIN, /**< Index of the min value */
488 MEAN_SUM, /**< Mean of sum */
489 PROD, /**< Product */
490 SUM_SQUARE, /**< Sum of squares */
Usama Arifa4a08ad2019-05-20 12:38:33 +0100491 SUM, /**< Sum */
492 MIN, /**< Min */
Usama Arif28f0dd92019-05-20 13:44:34 +0100493 MAX, /**< Max */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100494};
495
giuros01164a2722018-11-20 18:34:46 +0000496/** Available element-wise operations */
497enum class ArithmeticOperation
498{
499 ADD, /**< (x + y) */
500 SUB, /**< (x - y) */
501 DIV, /**< (x / y) */
502 MIN, /**< Min(x, y) */
503 MAX, /**< Max(x, y) */
504 SQUARED_DIFF, /**< (x - y)^2 */
Usama Arif81e671e2019-05-13 13:33:14 +0100505 POWER, /**< x ^ y */
giuros011e6e1b82019-05-14 16:12:53 +0100506 PRELU, /**< y*x if x < 0, x otherwise */
giuros01164a2722018-11-20 18:34:46 +0000507};
508
Michalis Spyroue9362622018-11-23 17:41:37 +0000509/** Available element wise unary operations */
510enum class ElementWiseUnary
511{
512 RSQRT, /**< Reverse square root */
513 EXP, /**< Exponential */
Usama Ariff6e475c2019-05-10 12:06:28 +0100514 NEG, /**< Negate */
Usama Arifc255aa72019-05-13 16:26:29 +0100515 LOG, /**< Natural Logarithm */
Manuel Bottini6ac59922019-05-15 14:06:02 +0100516 ABS, /**< Absolute value */
Michalis Spyrou0af44182019-05-17 14:04:47 +0100517 SIN, /**< Sine */
Usama Arif0a5a57a2019-05-23 14:20:33 +0100518 ROUND, /**< Round */
Michalis Spyroue9362622018-11-23 17:41:37 +0000519};
520
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100521/** The normalization type used for the normalization layer */
522enum class NormType
523{
524 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
525 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
526 CROSS_MAP /**< Normalization applied cross maps */
527};
528
529/** Normalization type for Histogram of Oriented Gradients (HOG) */
530enum class HOGNormType
531{
532 L2_NORM = 1, /**< L2-norm */
533 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
534 L1_NORM = 3 /**< L1 norm */
535};
536
537/** Detection window used for the object detection. The detection window keeps the following information:
538 *
539 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
540 * -# Index of the class used for evaluating which class the detection window belongs to
541 * -# Confidence value (score) obtained with the classifier
542 */
543struct DetectionWindow
544{
545 uint16_t x{ 0 }; /**< Top-left x coordinate */
546 uint16_t y{ 0 }; /**< Top-left y coordinate */
547 uint16_t width{ 0 }; /**< Width of the detection window */
548 uint16_t height{ 0 }; /**< Height of the detection window */
549 uint16_t idx_class{ 0 }; /**< Index of the class */
550 float score{ 0.f }; /**< Confidence value for the detection window */
551};
552
553/** Dimension rounding type when down-scaling on CNNs
554 * @note Used in pooling and convolution layer
555 */
556enum class DimensionRoundingType
557{
558 FLOOR, /**< Floor rounding */
559 CEIL /**< Ceil rounding */
560};
561
562/** Available pooling types */
563enum class PoolingType
564{
565 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100566 AVG, /**< Average Pooling */
567 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100568};
569
Michalis Spyrou2709d612018-09-19 09:46:47 +0100570/** Available non maxima suppression types */
571enum class NMSType
572{
573 LINEAR, /**< Linear NMS */
574 GAUSSIAN, /**< Gaussian NMS */
575 ORIGINAL /**< Original NMS */
576};
577
578/** BoxWithNonMaximaSuppressionLimit Information class */
579class BoxNMSLimitInfo final
580{
581public:
582 /** Constructor
583 *
584 * @param[in] score_thresh (Optional) Score threshold.
585 * @param[in] nms (Optional) NMS value
586 * @param[in] detections (Optional) Number of detections
587 * @param[in] soft_nms_enabled (Optional) Enable SoftNMS
588 * @param[in] soft_nms_method (Optional) Soft NMS method
589 * @param[in] soft_nms_sigma (Optional) Soft NMS sigma value
590 * @param[in] soft_nms_min_score_thres (Optional) Soft NMS minimum score threshold
Manuel Bottini5209be52019-02-13 16:34:56 +0000591 * @param[in] suppress_size (Optional) Filter out boxes based on their size. Defaults to false
592 * @param[in] min_size (Optional) Smaller boxes than min_size will be filtered out. Defaults to 1
593 * @param[in] im_width (Optional) Boxes whose centers (on the x axis) is beyond im_width will be filtered. Defaults to 1
594 * @param[in] im_height (Optional) Boxes whose centers (on the y axis) is beyond im_height will be filtered. Defaults to 1
Michalis Spyrou2709d612018-09-19 09:46:47 +0100595 */
596 BoxNMSLimitInfo(float score_thresh = 0.05f, float nms = 0.3f,
597 int detections = 100, bool soft_nms_enabled = false,
598 NMSType soft_nms_method = NMSType::LINEAR,
Manuel Bottini5209be52019-02-13 16:34:56 +0000599 float soft_nms_sigma = 0.5f, float soft_nms_min_score_thres = 0.001f, bool suppress_size = false, float min_size = 1.0f, float im_width = 1.0f, float im_height = 1.0f)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100600 : _score_thresh(score_thresh), _nms(nms), _detections_per_im(detections), _soft_nms_enabled(soft_nms_enabled), _soft_nms_method(soft_nms_method), _soft_nms_sigma(soft_nms_sigma),
Manuel Bottini5209be52019-02-13 16:34:56 +0000601 _soft_nms_min_score_thres(soft_nms_min_score_thres), _suppress_size(suppress_size), _min_size(min_size), _im_width(im_width), _im_height(im_height)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100602 {
603 }
604 /** Get the score threshold */
605 float score_thresh() const
606 {
607 return _score_thresh;
608 }
609 /** Get the NMS */
610 float nms() const
611 {
612 return _nms;
613 }
614 /** Get the number of detections */
615 int detections_per_im() const
616 {
617 return _detections_per_im;
618 }
619 /** Check if soft NMS is enabled */
620 bool soft_nms_enabled() const
621 {
622 return _soft_nms_enabled;
623 }
624 /** Get soft NMS method */
625 NMSType soft_nms_method() const
626 {
627 return _soft_nms_method;
628 }
629 /** Get soft NMS sigma */
630 float soft_nms_sigma() const
631 {
632 return _soft_nms_sigma;
633 }
634 /** Get soft nms min score threshold */
635 float soft_nms_min_score_thres() const
636 {
637 return _soft_nms_min_score_thres;
638 }
Manuel Bottini5209be52019-02-13 16:34:56 +0000639 /** Get if NMS will suppress boxes based on their size/position */
640 bool suppress_size() const
641 {
642 return _suppress_size;
643 }
644 /** Get size suppression threshold */
645 float min_size() const
646 {
647 return _min_size;
648 }
649 /** Get image width (NMS may suppress boxes whose center sits beyond the image width) */
650 float im_width() const
651 {
652 return _im_width;
653 }
654 /** Get image height (NMS may suppress boxes whose center sits beyond the image height) */
655 float im_height() const
656 {
657 return _im_height;
658 }
Michalis Spyrou2709d612018-09-19 09:46:47 +0100659
660private:
661 float _score_thresh;
662 float _nms;
663 int _detections_per_im;
664 bool _soft_nms_enabled;
665 NMSType _soft_nms_method;
666 float _soft_nms_sigma;
667 float _soft_nms_min_score_thres;
Manuel Bottini5209be52019-02-13 16:34:56 +0000668 bool _suppress_size;
669 float _min_size;
670 float _im_width;
671 float _im_height;
Michalis Spyrou2709d612018-09-19 09:46:47 +0100672};
673
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100674/** Padding and stride information class */
675class PadStrideInfo
676{
677public:
678 /** Constructor
679 *
680 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
681 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
682 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
683 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
684 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
685 */
686 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
687 unsigned int pad_x = 0, unsigned int pad_y = 0,
688 DimensionRoundingType round = DimensionRoundingType::FLOOR)
689 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100690 _pad_left(pad_x),
691 _pad_top(pad_y),
692 _pad_right(pad_x),
693 _pad_bottom(pad_y),
694 _round_type(round)
695 {
696 }
697 /** Constructor
698 *
699 * @param[in] stride_x Stride, in elements, across x.
700 * @param[in] stride_y Stride, in elements, across y.
701 * @param[in] pad_left Padding across x on the left, in elements.
702 * @param[in] pad_top Padding across y on the top, in elements.
703 * @param[in] pad_right Padding across x on the right, in elements.
704 * @param[in] pad_bottom Padding across y on the bottom, in elements.
705 * @param[in] round Dimensions rounding.
706 */
707 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
708 unsigned int pad_left, unsigned int pad_right,
709 unsigned int pad_top, unsigned int pad_bottom,
710 DimensionRoundingType round)
711 : _stride(std::make_pair(stride_x, stride_y)),
712 _pad_left(pad_left),
713 _pad_top(pad_top),
714 _pad_right(pad_right),
715 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100716 _round_type(round)
717 {
718 }
Alex Gildayc357c472018-03-21 13:54:09 +0000719 /** Get the stride.
720 *
721 * @return a pair: stride x, stride y.
722 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100723 std::pair<unsigned int, unsigned int> stride() const
724 {
725 return _stride;
726 }
Alex Gildayc357c472018-03-21 13:54:09 +0000727 /** Check whether the padding is symmetric.
728 *
729 * @return True if the padding is symmetric.
730 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000731 bool padding_is_symmetric() const
732 {
733 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
734 }
Alex Gildayc357c472018-03-21 13:54:09 +0000735 /** Get the padding.
736 *
737 * @note This should only be used when the padding is symmetric.
738 *
739 * @return a pair: padding left/right, padding top/bottom
740 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100741 std::pair<unsigned int, unsigned int> pad() const
742 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100743 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000744 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100745 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100746 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100747
Alex Gildayc357c472018-03-21 13:54:09 +0000748 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100749 unsigned int pad_left() const
750 {
751 return _pad_left;
752 }
Alex Gildayc357c472018-03-21 13:54:09 +0000753 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100754 unsigned int pad_right() const
755 {
756 return _pad_right;
757 }
Alex Gildayc357c472018-03-21 13:54:09 +0000758 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100759 unsigned int pad_top() const
760 {
761 return _pad_top;
762 }
Alex Gildayc357c472018-03-21 13:54:09 +0000763 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100764 unsigned int pad_bottom() const
765 {
766 return _pad_bottom;
767 }
768
Alex Gildayc357c472018-03-21 13:54:09 +0000769 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100770 DimensionRoundingType round() const
771 {
772 return _round_type;
773 }
774
Alex Gildayc357c472018-03-21 13:54:09 +0000775 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100776 bool has_padding() const
777 {
778 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
779 }
780
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100781private:
782 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100783 unsigned int _pad_left;
784 unsigned int _pad_top;
785 unsigned int _pad_right;
786 unsigned int _pad_bottom;
787
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100788 DimensionRoundingType _round_type;
789};
790
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100791/** Fully connected layer info */
792struct FullyConnectedLayerInfo
793{
794 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
795 bool transpose_weights{ true }; /**< Transpose weights if true. */
796 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
797 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100798
799 /** Sets the weights trained data layout
800 *
801 * @param[in] layout Data layout that the weights were trained with
802 *
803 * @return Updated object
804 */
805 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
806 {
807 weights_trained_layout = layout;
808 return *this;
809 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100810 /** Sets the transpose weights flag
811 *
812 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
813 *
814 * @return Updated object
815 */
816 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
817 {
818 transpose_weights = should_transpose_weights;
819 return *this;
820 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100821};
822
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100823/** PriorBox layer info */
824class PriorBoxLayerInfo final
825{
826public:
827 /** Default Constructor */
828 PriorBoxLayerInfo()
829 : _min_sizes(),
830 _variances(),
831 _offset(),
832 _flip(true),
833 _clip(false),
834 _max_sizes(),
835 _aspect_ratios(),
836 _img_size(),
837 _steps()
838 {
839 }
840 /** Constructor
841 *
842 * @param[in] min_sizes Min sizes vector.
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100843 * @param[in] variances Variances vector.
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100844 * @param[in] offset Offset value.
845 * @param[in] flip (Optional) Flip the aspect ratios.
846 * @param[in] clip (Optional) Clip coordinates so that they're within [0,1].
847 * @param[in] max_sizes (Optional) Max sizes vector.
848 * @param[in] aspect_ratios (Optional) Aspect ratios of the boxes.
849 * @param[in] img_size (Optional) Image size.
850 * @param[in] steps (Optional) Step values.
851 */
852 PriorBoxLayerInfo(const std::vector<float> &min_sizes, const std::vector<float> &variances, float offset, bool flip = true, bool clip = false,
Pablo Tello32521432018-11-15 14:43:10 +0000853 const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {},
854 const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100855 : _min_sizes(min_sizes),
856 _variances(variances),
857 _offset(offset),
858 _flip(flip),
859 _clip(clip),
860 _max_sizes(max_sizes),
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100861 _aspect_ratios(),
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100862 _img_size(img_size),
863 _steps(steps)
864 {
865 _aspect_ratios.push_back(1.);
866 for(unsigned int i = 0; i < aspect_ratios.size(); ++i)
867 {
868 float ar = aspect_ratios[i];
869 bool already_exist = false;
870 for(auto ar_new : _aspect_ratios)
871 {
872 if(fabs(ar - ar_new) < 1e-6)
873 {
874 already_exist = true;
875 break;
876 }
877 }
878 if(!already_exist)
879 {
880 _aspect_ratios.push_back(ar);
881 if(flip)
882 {
883 _aspect_ratios.push_back(1.f / ar);
884 }
885 }
886 }
887 }
888 /** Get min sizes. */
889 std::vector<float> min_sizes() const
890 {
891 return _min_sizes;
892 }
893 /** Get min variances. */
894 std::vector<float> variances() const
895 {
896 return _variances;
897 }
898 /** Get the step coordinates */
899 std::array<float, 2> steps() const
900 {
901 return _steps;
902 }
903 /** Get the image size coordinates */
904 Coordinates2D img_size() const
905 {
906 return _img_size;
907 }
908 /** Get the offset */
909 float offset() const
910 {
911 return _offset;
912 }
913 /** Get the flip value */
914 bool flip() const
915 {
916 return _flip;
917 }
918 /** Get the clip value */
919 bool clip() const
920 {
921 return _clip;
922 }
923 /** Get max sizes. */
924 std::vector<float> max_sizes() const
925 {
926 return _max_sizes;
927 }
928 /** Get aspect ratios. */
929 std::vector<float> aspect_ratios() const
930 {
931 return _aspect_ratios;
932 }
933
934private:
935 std::vector<float> _min_sizes;
936 std::vector<float> _variances;
937 float _offset;
938 bool _flip;
939 bool _clip;
940 std::vector<float> _max_sizes;
941 std::vector<float> _aspect_ratios;
942 Coordinates2D _img_size;
943 std::array<float, 2> _steps;
944};
945
Isabella Gottardi05e56442018-11-16 11:26:52 +0000946/** Available Detection Output code types */
947enum class DetectionOutputLayerCodeType
948{
949 CORNER, /**< Use box corners */
950 CENTER_SIZE, /**< Use box centers and size */
951 CORNER_SIZE, /**< Use box centers and size */
952 TF_CENTER /**< Use box centers and size but flip x and y co-ordinates */
953};
954
955/** Detection Output layer info */
956class DetectionOutputLayerInfo final
957{
958public:
959 /** Default Constructor */
960 DetectionOutputLayerInfo()
961 : _num_classes(),
962 _share_location(),
963 _code_type(DetectionOutputLayerCodeType::CORNER),
964 _keep_top_k(),
965 _nms_threshold(),
966 _top_k(),
967 _background_label_id(),
968 _confidence_threshold(),
969 _variance_encoded_in_target(false),
970 _eta(),
971 _num_loc_classes()
972 {
973 _num_loc_classes = _share_location ? 1 : _num_classes;
974 }
975 /** Constructor
976 *
977 * @param[in] num_classes Number of classes to be predicted.
978 * @param[in] share_location If true, bounding box are shared among different classes.
979 * @param[in] code_type Type of coding method for bbox.
980 * @param[in] keep_top_k Number of total bounding boxes to be kept per image after NMS step.
981 * @param[in] nms_threshold Threshold to be used in NMS.
982 * @param[in] top_k (Optional) Number of boxes per image with top confidence scores that are fed into the NMS algorithm. Default set to -1.
983 * @param[in] background_label_id (Optional) Background label ID. If there is no background class, set it as -1.
984 * @param[in] confidence_threshold (Optional) Only consider detections whose confidences are larger than a threshold. Default set to -FLT_MAX.
985 * @param[in] variance_encoded_in_target (Optional) If true, variance is encoded in target. Otherwise we need to adjust the predicted offset accordingly.Default set to false.
986 * @param[in] eta (Optional) Eta.
987 */
988 DetectionOutputLayerInfo(int num_classes, bool share_location, DetectionOutputLayerCodeType code_type, int keep_top_k, float nms_threshold, int top_k = -1, int background_label_id = -1,
989 float confidence_threshold = std::numeric_limits<float>::lowest(), bool variance_encoded_in_target = false, float eta = 1)
990 : _num_classes(num_classes),
991 _share_location(share_location),
992 _code_type(code_type),
993 _keep_top_k(keep_top_k),
994 _nms_threshold(nms_threshold),
995 _top_k(top_k),
996 _background_label_id(background_label_id),
997 _confidence_threshold(confidence_threshold),
998 _variance_encoded_in_target(variance_encoded_in_target),
999 _eta(eta),
1000 _num_loc_classes()
1001 {
1002 _num_loc_classes = _share_location ? 1 : _num_classes;
1003 }
1004 /** Get num classes. */
1005 int num_classes() const
1006 {
1007 return _num_classes;
1008 }
1009 /** Get share location. */
1010 bool share_location() const
1011 {
1012 return _share_location;
1013 }
1014 /** Get detection output code type. */
1015 DetectionOutputLayerCodeType code_type() const
1016 {
1017 return _code_type;
1018 }
1019 /** Get if variance encoded in target. */
1020 bool variance_encoded_in_target() const
1021 {
1022 return _variance_encoded_in_target;
1023 }
1024 /** Get the number of total bounding boxes to be kept per image. */
1025 int keep_top_k() const
1026 {
1027 return _keep_top_k;
1028 }
1029 /** Get nms threshold. */
1030 float nms_threshold() const
1031 {
1032 return _nms_threshold;
1033 }
1034 /** Get eta. */
1035 float eta() const
1036 {
1037 return _eta;
1038 }
1039 /** Get background label ID. */
1040 int background_label_id() const
1041 {
1042 return _background_label_id;
1043 }
1044 /** Get confidence threshold. */
1045 float confidence_threshold() const
1046 {
1047 return _confidence_threshold;
1048 }
1049 /** Get top K. */
1050 int top_k() const
1051 {
1052 return _top_k;
1053 }
1054 /** Get number of location classes. */
1055 int num_loc_classes() const
1056 {
1057 return _num_loc_classes;
1058 }
1059
1060private:
1061 int _num_classes;
1062 bool _share_location;
1063 DetectionOutputLayerCodeType _code_type;
1064 int _keep_top_k;
1065 float _nms_threshold;
1066 int _top_k;
1067 int _background_label_id;
1068 float _confidence_threshold;
1069 bool _variance_encoded_in_target;
1070 float _eta;
1071 int _num_loc_classes;
1072};
1073
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001074/** Pooling Layer Information class */
1075class PoolingLayerInfo
1076{
1077public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001078 /** Default Constructor */
1079 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001080 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001081 {
1082 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001083 /** Default Constructor
1084 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001085 * @param[in] pool_type Pooling type @ref PoolingType.
1086 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001087 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001088 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1089 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1090 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001091 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001092 explicit PoolingLayerInfo(PoolingType pool_type,
1093 unsigned int pool_size,
1094 PadStrideInfo pad_stride_info = PadStrideInfo(),
1095 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001096 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1097 {
1098 }
1099 /** Default Constructor
1100 *
1101 * @param[in] pool_type Pooling type @ref PoolingType.
1102 * @param[in] pool_size Pooling size, in elements, across x and y.
1103 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
1104 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1105 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1106 * Defaults to false;
1107 */
1108 explicit PoolingLayerInfo(PoolingType pool_type,
1109 Size2D pool_size,
1110 PadStrideInfo pad_stride_info = PadStrideInfo(),
1111 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001112 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1113 {
1114 }
1115 /** Default Constructor
1116 *
1117 * @note This constructor is used for global pooling
1118 *
1119 * @param[in] pool_type Pooling type @ref PoolingType.
1120 */
1121 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001122 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001123 {
1124 }
Alex Gildayc357c472018-03-21 13:54:09 +00001125 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001126 PoolingType pool_type() const
1127 {
1128 return _pool_type;
1129 }
Alex Gildayc357c472018-03-21 13:54:09 +00001130 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001131 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001132 {
1133 return _pool_size;
1134 }
Alex Gildayc357c472018-03-21 13:54:09 +00001135 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001136 PadStrideInfo pad_stride_info() const
1137 {
1138 return _pad_stride_info;
1139 }
Alex Gildayc357c472018-03-21 13:54:09 +00001140 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001141 bool exclude_padding() const
1142 {
1143 return _exclude_padding;
1144 }
Alex Gildayc357c472018-03-21 13:54:09 +00001145 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001146 bool is_global_pooling() const
1147 {
1148 return _is_global_pooling;
1149 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001150
1151private:
1152 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001153 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001154 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001155 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001156 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001157};
1158
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001159/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +01001160class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001161{
1162public:
giuros0118870812018-09-13 09:31:40 +01001163 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001164 *
giuros0118870812018-09-13 09:31:40 +01001165 * @param[in] pooled_width Pooled width of the layer.
1166 * @param[in] pooled_height Pooled height of the layer.
1167 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
1168 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001169 */
giuros0118870812018-09-13 09:31:40 +01001170 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
1171 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001172 {
1173 }
Alex Gildayc357c472018-03-21 13:54:09 +00001174 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001175 unsigned int pooled_width() const
1176 {
1177 return _pooled_width;
1178 }
Alex Gildayc357c472018-03-21 13:54:09 +00001179 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001180 unsigned int pooled_height() const
1181 {
1182 return _pooled_height;
1183 }
Alex Gildayc357c472018-03-21 13:54:09 +00001184 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001185 float spatial_scale() const
1186 {
1187 return _spatial_scale;
1188 }
giuros0118870812018-09-13 09:31:40 +01001189 /** Get sampling ratio */
1190 unsigned int sampling_ratio() const
1191 {
1192 return _sampling_ratio;
1193 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001194
1195private:
1196 unsigned int _pooled_width;
1197 unsigned int _pooled_height;
1198 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +01001199 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001200};
1201
Manuel Bottini5209be52019-02-13 16:34:56 +00001202/** Generate Proposals Information class */
1203class GenerateProposalsInfo
1204{
1205public:
1206 /** Constructor
1207 *
1208 * @param[in] im_width Width of the original image
1209 * @param[in] im_height Height of the original image
1210 * @param[in] im_scale Scale applied to the original image
1211 * @param[in] spatial_scale (Optional)Scale applied to the feature map. Defaults to 1.0
1212 * @param[in] pre_nms_topN (Optional)Number of the best scores to be selected from the transformations. Defaults to 6000.
1213 * @param[in] post_nms_topN (Optional)Number of the best scores to be selected from the NMS operation. Defaults to 300.
1214 * @param[in] nms_thres (Optional)NMS overlap threshold. Defaults to 0.7.
1215 * @param[in] min_size (Optional)Size used to validate the anchors produced. Defaults to 16.
1216 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region of interest). Defaults to 4.
1217 */
1218 GenerateProposalsInfo(float im_width, float im_height, float im_scale, float spatial_scale = 1.0, int pre_nms_topN = 6000, int post_nms_topN = 300, float nms_thres = 0.7, float min_size = 16.0,
1219 size_t values_per_roi = 4)
1220 : _im_height(im_height), _im_width(im_width), _im_scale(im_scale), _spatial_scale(spatial_scale), _pre_nms_topN(pre_nms_topN), _post_nms_topN(post_nms_topN), _nms_thres(nms_thres),
1221 _min_size(min_size), _values_per_roi(values_per_roi)
1222 {
1223 }
1224
1225 /* Get the original height */
1226 float im_height() const
1227 {
1228 return _im_height;
1229 }
1230 /* Get the original width */
1231 float im_width() const
1232 {
1233 return _im_width;
1234 }
1235 /* Get the image scale */
1236 float im_scale() const
1237 {
1238 return _im_scale;
1239 }
1240 /* Get the value of how many best scores to select (before NMS) */
1241 int pre_nms_topN() const
1242 {
1243 return _pre_nms_topN;
1244 }
1245 /* Get the value of how many best scores to select (after NMS) */
1246 int post_nms_topN() const
1247 {
1248 return _post_nms_topN;
1249 }
1250 /* Get the NMS overlap threshold */
1251 float nms_thres() const
1252 {
1253 return _nms_thres;
1254 }
1255 /* Get the minimal size */
1256 float min_size() const
1257 {
1258 return _min_size;
1259 }
1260 /* Get the spatial scale to be applied to the feature maps */
1261 float spatial_scale() const
1262 {
1263 return _spatial_scale;
1264 }
1265 /* Get the values used to represent a ROI(Region of interest)*/
1266 size_t values_per_roi() const
1267 {
1268 return _values_per_roi;
1269 }
1270
1271private:
1272 float _im_height;
1273 float _im_width;
1274 float _im_scale;
1275 float _spatial_scale;
1276 int _pre_nms_topN;
1277 int _post_nms_topN;
1278 float _nms_thres;
1279 float _min_size;
1280 size_t _values_per_roi;
1281};
1282
1283/** ComputeAnchors information class */
1284class ComputeAnchorsInfo
1285{
1286public:
1287 /** Constructor
1288 *
1289 * @param[in] feat_width Feature map width
1290 * @param[in] feat_height Feature map height
1291 * @param[in] spatial_scale Feature map scale
1292 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region Of Interest). Defaults to 4
1293 */
1294 ComputeAnchorsInfo(float feat_width, float feat_height, float spatial_scale, size_t values_per_roi = 4)
1295 : _feat_height(feat_height),
1296 _feat_width(feat_width),
1297 _spatial_scale(spatial_scale),
1298 _values_per_roi(values_per_roi)
1299 {
1300 }
1301
1302 /* Get the height of the feature map */
1303 float feat_height() const
1304 {
1305 return _feat_height;
1306 }
1307
1308 /* Get the width of the feature map */
1309 float feat_width() const
1310 {
1311 return _feat_width;
1312 }
1313
1314 /* Get the scale of the feature map */
1315 float spatial_scale() const
1316 {
1317 return _spatial_scale;
1318 }
1319
1320 /* Get the values used to represent a ROI(Region Of Interest)*/
1321 size_t values_per_roi() const
1322 {
1323 return _values_per_roi;
1324 }
1325
1326private:
1327 float _feat_height;
1328 float _feat_width;
1329 float _spatial_scale;
1330 size_t _values_per_roi;
1331};
1332
giuros01c04a0e82018-10-03 12:44:35 +01001333/** Bounding Box Transform information class */
giuros01d696cb62018-11-16 10:39:59 +00001334class BoundingBoxTransformInfo final
giuros01c04a0e82018-10-03 12:44:35 +01001335{
1336public:
1337 /** Constructor
1338 *
giuros01d696cb62018-11-16 10:39:59 +00001339 * @param[in] img_width Width of the original image
1340 * @param[in] img_height Height, of the original image
1341 * @param[in] scale Scale of the original image
1342 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
1343 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
1344 * @param[in] correct_transform_coords (Optional)Correct bounding box transform coordinates. Defaults to false
1345 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
giuros01c04a0e82018-10-03 12:44:35 +01001346 */
giuros01d696cb62018-11-16 10:39:59 +00001347 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { { 1.f, 1.f, 1.f, 1.f } }, bool correct_transform_coords =
1348 false,
1349 float bbox_xform_clip =
1350 4.135166556742356f)
1351 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _correct_transform_coords(correct_transform_coords), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
giuros01c04a0e82018-10-03 12:44:35 +01001352 {
1353 }
1354
1355 std::array<float, 4> weights() const
1356 {
1357 return _weights;
1358 }
1359
1360 float bbox_xform_clip() const
1361 {
1362 return _bbox_xform_clip;
1363 }
1364
1365 float img_height() const
1366 {
1367 return _img_height;
1368 }
1369
1370 float img_width() const
1371 {
1372 return _img_width;
1373 }
1374
1375 float scale() const
1376 {
1377 return _scale;
1378 }
1379
1380 bool apply_scale() const
1381 {
1382 return _apply_scale;
1383 }
1384
giuros01d696cb62018-11-16 10:39:59 +00001385 bool correct_transform_coords() const
1386 {
1387 return _correct_transform_coords;
1388 }
1389
giuros01c04a0e82018-10-03 12:44:35 +01001390private:
1391 float _img_width;
1392 float _img_height;
1393 float _scale;
1394 bool _apply_scale;
giuros01d696cb62018-11-16 10:39:59 +00001395 bool _correct_transform_coords;
giuros01c04a0e82018-10-03 12:44:35 +01001396 std::array<float, 4> _weights;
1397 float _bbox_xform_clip;
1398};
1399
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001400/** Activation Layer Information class */
1401class ActivationLayerInfo
1402{
1403public:
1404 /** Available activation functions */
1405 enum class ActivationFunction
1406 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001407 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
1408 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
1409 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
1410 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
1411 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
Manuel Bottini581c8982019-02-07 10:31:57 +00001412 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x) = \begin{cases} \alpha x & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001413 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1414 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
1415 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
1416 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
Usama Arif6a98a6e2019-05-10 17:07:27 +01001417 LINEAR, /**< Linear ( \f$ f(x)= ax + b \f$ ) */
1418 IDENTITY /**< Identity ( \f$ f(x)= x \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001419 };
1420
Giorgio Arena11674872018-02-07 15:38:12 +00001421 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001422 /** Default Constructor
1423 *
1424 * @param[in] f The activation function to use.
1425 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001426 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
1427 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001428 */
1429 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +00001430 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001431 {
1432 }
Alex Gildayc357c472018-03-21 13:54:09 +00001433 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001434 ActivationFunction activation() const
1435 {
1436 return _act;
1437 }
Alex Gildayc357c472018-03-21 13:54:09 +00001438 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001439 float a() const
1440 {
1441 return _a;
1442 }
Alex Gildayc357c472018-03-21 13:54:09 +00001443 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001444 float b() const
1445 {
1446 return _b;
1447 }
Alex Gildayc357c472018-03-21 13:54:09 +00001448 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +00001449 bool enabled() const
1450 {
1451 return _enabled;
1452 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001453
1454private:
Usama Arif6a98a6e2019-05-10 17:07:27 +01001455 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::IDENTITY };
Giorgio Arena11674872018-02-07 15:38:12 +00001456 float _a = {};
1457 float _b = {};
1458 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001459};
1460
1461/** Normalization Layer Information class */
1462class NormalizationLayerInfo
1463{
1464public:
1465 /** Default Constructor
1466 *
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001467 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NormType::CROSS_MAP
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001468 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +00001469 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
1470 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
1471 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
1472 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
1473 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001474 */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001475 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
1476 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001477 {
1478 }
Alex Gildayc357c472018-03-21 13:54:09 +00001479 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001480 NormType type() const
1481 {
1482 return _type;
1483 }
Alex Gildayc357c472018-03-21 13:54:09 +00001484 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001485 uint32_t norm_size() const
1486 {
1487 return _norm_size;
1488 }
Alex Gildayc357c472018-03-21 13:54:09 +00001489 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001490 float alpha() const
1491 {
1492 return _alpha;
1493 }
Alex Gildayc357c472018-03-21 13:54:09 +00001494 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001495 float beta() const
1496 {
1497 return _beta;
1498 }
Alex Gildayc357c472018-03-21 13:54:09 +00001499 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001500 float kappa() const
1501 {
1502 return _kappa;
1503 }
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001504 /** Get the is_scaled value */
1505 bool is_scaled() const
1506 {
1507 return _is_scaled;
1508 }
Alex Gildayc357c472018-03-21 13:54:09 +00001509 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001510 bool is_cross_map() const
1511 {
1512 return _type == NormType::CROSS_MAP;
1513 }
Alex Gildayc357c472018-03-21 13:54:09 +00001514 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001515 bool is_in_map() const
1516 {
1517 return !is_cross_map();
1518 }
1519 /** Return the scaling factor of the normalization function.
1520 *
1521 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1522 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001523 *
1524 * @return The normalization scaling factor.
1525 */
1526 float scale_coeff() const
1527 {
1528 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001529 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001530 }
1531
1532private:
1533 NormType _type;
1534 uint32_t _norm_size;
1535 float _alpha;
1536 float _beta;
1537 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001538 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001539};
1540
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001541/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001542class WeightsInfo
1543{
1544public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001545 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001546 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001547 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001548 {
1549 }
1550 /** Constructor
1551 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001552 * @param[in] are_reshaped True if the weights have been reshaped
1553 * @param[in] kernel_width Kernel width.
1554 * @param[in] kernel_height Kernel height.
1555 * @param[in] num_kernels Number of convolution kernels.
1556 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001557 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001558 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1559 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001560 {
1561 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001562 /** Flag which specifies if the weights tensor has been reshaped.
1563 *
1564 * @return True if the weights tensors has been reshaped
1565 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001566 bool are_reshaped() const
1567 {
1568 return _are_reshaped;
1569 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001570 /** Return the number of convolution kernels
1571 *
1572 * @return The number of convolution kernels
1573 */
1574 unsigned int num_kernels() const
1575 {
1576 return _num_kernels;
1577 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001578 /** Return the width and height of the kernel
1579 *
1580 * @return The width and height of the kernel
1581 */
1582 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001583 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001584 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001585 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001586 bool retain_internal_weights() const
1587 {
1588 return _retain_internal_weights;
1589 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001590
1591private:
1592 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001593 const unsigned int _kernel_width;
1594 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001595 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001596 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001597};
1598
Gian Marco36a0a462018-01-12 10:21:40 +00001599/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1600 *
Gian Marco Iodice5fc07aa2019-05-15 17:08:02 +01001601 * The matrix A can only be reshaped through @ref CLGEMMReshapeLHSMatrixKernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1602 * Note: Optionally just for @ref CLGEMMReshapeLHSMatrixKernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
Gian Marco36a0a462018-01-12 10:21:40 +00001603 *
giuros018b6b4a92018-12-18 19:01:33 +00001604 * The matrix B can only be reshaped through @ref CLGEMMReshapeRHSMatrixKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1605 * Note: Optionally just for @ref CLGEMMReshapeRHSMatrixKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
Gian Marco36a0a462018-01-12 10:21:40 +00001606 *
1607 */
1608class GEMMReshapeInfo final
1609{
1610public:
1611 /** Default constructor */
1612 GEMMReshapeInfo()
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001613 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false), _broadcast_bias(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001614 {
1615 }
1616 /** Constructor
1617 *
1618 * @param[in] m Number of matrix A rows
1619 * @param[in] n Number of matrix B columns
1620 * @param[in] k Number of matrix A columns or matrix B rows
1621 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1622 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001623 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel.
1624 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001625 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001626 * to perform 1x1 convolutions with the NHWC data layout)
1627 * @param[in] broadcast_bias (Optional) Broadcast the shape of the bias tensor from a vector to a matrix.
Gian Marco36a0a462018-01-12 10:21:40 +00001628 */
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001629 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool broadcast_bias = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001630 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001631 _reinterpret_input_as_3d(reinterpret_input_as_3d), _broadcast_bias(broadcast_bias)
Gian Marco36a0a462018-01-12 10:21:40 +00001632 {
1633 }
1634 /** Number of matrix A rows
1635 *
1636 * @return the number of matrix A rows
1637 */
1638 int m() const
1639 {
1640 return _m;
1641 }
1642 /** Number of matrix B columns
1643 *
1644 * @return the number of matrix B columns
1645 */
1646 int n() const
1647 {
1648 return _n;
1649 }
1650 /** Number of matrix A columns or matrix B rows
1651 *
1652 * @return the number of matrix A columns or matrix B rows
1653 */
1654 int k() const
1655 {
1656 return _k;
1657 }
1658 /** Multiplication factor for the width of the 1xW transposed block
1659 *
1660 * @return the multiplication factor for the width of the 1xW transposed block
1661 */
1662 int mult_transpose1xW_width() const
1663 {
1664 return _mult_transpose1xW_width;
1665 }
1666 /** Multiplication factor for the height of the 4x4 interleaved block
1667 *
1668 * @return the multiplication factor for the height of the 4x4 interleaved block
1669 */
1670 int mult_interleave4x4_height() const
1671 {
1672 return _mult_interleave4x4_height;
1673 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001674 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1675 *
1676 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1677 * m = depth_output_gemm3d * output_height
1678 *
1679 * @return the depth of the output tensor to be used with the GEMM3D kernel
1680 */
1681 int depth_output_gemm3d() const
1682 {
1683 return _depth_output_gemm3d;
1684 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001685 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1686 *
1687 * @return True if the input tensor has to be reinterpreted as 3D tensor
1688 */
1689 bool reinterpret_input_as_3d() const
1690 {
1691 return _reinterpret_input_as_3d;
1692 };
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001693 /** Flag which specifies whether to broadcast the shape of the bias tensor.
1694 *
1695 * @return True if the shape of the bias tensor is to be broadcasted.
1696 */
1697 bool broadcast_bias() const
1698 {
1699 return _broadcast_bias;
1700 };
Gian Marco36a0a462018-01-12 10:21:40 +00001701
1702private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001703 const int _m;
1704 const int _n;
1705 const int _k;
1706 const int _mult_transpose1xW_width;
1707 const int _mult_interleave4x4_height;
1708 const int _depth_output_gemm3d;
1709 const bool _reinterpret_input_as_3d;
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001710 const bool _broadcast_bias;
Gian Marco36a0a462018-01-12 10:21:40 +00001711};
1712
giuros016d109962019-01-07 17:47:19 +00001713struct DepthwiseConvolutionReshapeInfo
1714{
1715 unsigned int c0{ 1 }; /**< Number of channels processed by the depth-wise convolution */
1716 bool transpose{ false }; /**< True if the block MxC0 (where M is the area of the filter i.e. KwxKh) has to be transposed */
1717};
1718
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001719/** GEMMLowp output stage type */
1720enum class GEMMLowpOutputStageType
1721{
1722 NONE, /**< No quantization to uint8 */
1723 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1724 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1725 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1726};
1727
1728/** GEMMLowp output stage info */
1729struct GEMMLowpOutputStageInfo
1730{
1731 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1732 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1733 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1734 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1735 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1736 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1737};
1738
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +00001739/** GEMM LHS (Left Hand Side) matrix information */
1740struct GEMMLHSMatrixInfo
1741{
1742 unsigned int m0{ 1 }; /**< Number of rows processed by the matrix multiplication */
1743 unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
1744 unsigned int v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
1745 bool transpose{ true }; /**< True if the (m0xk0) block has to be transposed before been stored */
1746 bool interleave{ true }; /**< True if the v0 (m0xk0) blocks have to be interleaved in the output row */
1747};
1748
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +00001749/** GEMM RHS (Right Hand Side) matrix information */
1750struct GEMMRHSMatrixInfo
1751{
1752 unsigned int n0{ 1 }; /**< Number of columns processed by the matrix multiplication */
1753 unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
1754 unsigned int h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
1755 bool transpose{ true }; /**< True if the (k0xn0) block has to be transposed before been stored */
1756 bool interleave{ true }; /**< True if the h0 (k0xn0) blocks have to be interleaved in the output row */
1757};
1758
Gian Marco36a0a462018-01-12 10:21:40 +00001759/** GEMM information class. This class stores the necessary information to compute GEMM functions
1760 *
1761 * This object also contains the information about how matrix A and matrix B have been reshaped
1762 *
1763 */
Chunosov5124be52017-11-22 20:42:13 +07001764class GEMMInfo
1765{
1766public:
1767 /** Default constructor */
Georgios Pinitas37d080f2019-06-21 18:43:12 +01001768 GEMMInfo() noexcept
1769 : _is_a_reshaped(false),
1770 _is_b_reshaped(false),
1771 _reshape_b_only_on_first_run(true),
1772 _depth_output_gemm3d(0),
1773 _reinterpret_input_as_3d(false),
1774 _retain_internal_weights(false),
1775 _gemmlowp_output_stage(),
1776 _fp_mixed_precision(false),
1777 _broadcast_bias(false),
Gian Marco Iodicef3622be2019-07-29 14:27:16 +01001778 _pretranpose_B(true),
1779 _activation_info()
Chunosov5124be52017-11-22 20:42:13 +07001780 {
1781 }
1782 /** Constructor
1783 *
1784 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1785 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1786 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001787 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001788 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001789 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1790 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001791 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001792 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001793 * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001794 * @param[in] broadcast_bias (Optional) Broadcast the shape of the bias tensor from a vector to a matrix.
Gian Marco Iodicef3622be2019-07-29 14:27:16 +01001795 * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
Chunosov5124be52017-11-22 20:42:13 +07001796 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001797 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
Gian Marco Iodicef3622be2019-07-29 14:27:16 +01001798 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool broadcast_bias = false,
1799 const ActivationLayerInfo &activation_info = ActivationLayerInfo()) noexcept
Georgios Pinitas37d080f2019-06-21 18:43:12 +01001800 : _is_a_reshaped(is_a_reshaped),
1801 _is_b_reshaped(is_b_reshaped),
1802 _reshape_b_only_on_first_run(reshape_b_only_on_first_run),
1803 _depth_output_gemm3d(depth_output_gemm3d),
1804 _reinterpret_input_as_3d(reinterpret_input_as_3d),
1805 _retain_internal_weights(retain_internal_weights),
1806 _gemmlowp_output_stage(gemmlowp_output_stage),
1807 _fp_mixed_precision(fp_mixed_precision),
1808 _broadcast_bias(broadcast_bias),
Gian Marco Iodicef3622be2019-07-29 14:27:16 +01001809 _pretranpose_B(reshape_b_only_on_first_run),
1810 _activation_info(activation_info)
Chunosov5124be52017-11-22 20:42:13 +07001811 {
1812 }
1813 /** Flag which specifies if the matrix A has been reshaped
1814 *
1815 * @return True if the matrix A has been reshaped
1816 */
1817 bool is_a_reshaped() const
1818 {
1819 return _is_a_reshaped;
1820 };
1821 /** Flag which specifies if the matrix B has been reshaped
1822 *
1823 * @return True if the matrix B has been reshaped
1824 */
1825 bool is_b_reshaped() const
1826 {
1827 return _is_b_reshaped;
1828 };
1829 /** Flag which specifies if the reshape of matrix B should executed only for the first
1830 *
1831 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1832 *
1833 * @return True if the reshaped of matrix B happens only for the first run
1834 */
1835 bool reshape_b_only_on_first_run() const
1836 {
1837 return _reshape_b_only_on_first_run;
1838 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001839 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001840 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001841 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001842 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001843 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001844 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001845 return _depth_output_gemm3d;
1846 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001847 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1848 *
1849 * @return True if the input tensor has to be reinterpreted as 3D tensor
1850 */
1851 bool reinterpret_input_as_3d() const
1852 {
1853 return _reinterpret_input_as_3d;
1854 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001855 /** Flag which specifies if the weights tensor has to be retained from previous run
1856 *
1857 * @return True if the weights tensor has to be retained
1858 */
1859 bool retain_internal_weights() const
1860 {
1861 return _retain_internal_weights;
1862 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001863 /** GEMMLowp output stage
1864 *
1865 * @return the GEMMLowp output stage info
1866 */
1867 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1868 {
1869 return _gemmlowp_output_stage;
1870 };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001871 /** Flag which specifies if a wider accumulator should be used.
1872 *
1873 * @return True if a wider accumulator has to be used
1874 */
1875 bool fp_mixed_precision() const
1876 {
1877 return _fp_mixed_precision;
1878 };
Georgios Pinitasb0f342e2019-05-21 13:32:43 +01001879 /** Flag which specifies whether to broadcast the shape of the bias tensor.
1880 *
1881 * @return True if the shape of the bias tensor is to be broadcasted.
1882 */
1883 bool broadcast_bias() const
1884 {
1885 return _broadcast_bias;
1886 };
Georgios Pinitas37d080f2019-06-21 18:43:12 +01001887 /** Flag which specifies whether b should be pre-transposed if supported.
1888 *
1889 * @return True if b should be pre-transposed else false.
1890 */
1891 bool pretranpose_B() const
1892 {
1893 return _pretranpose_B;
1894 };
1895 /** Set pre-transpose b flag
1896 *
1897 * @param[in] flag Flag to set
1898 */
1899 void set_pretranpose_B(bool flag)
1900 {
1901 _pretranpose_B = flag;
1902 }
Gian Marco Iodicef3622be2019-07-29 14:27:16 +01001903 /** Activation layer to apply after the matrix multiplication
1904 *
1905 * @return ActivationLayerInfo object
1906 */
1907 ActivationLayerInfo activation_info() const
1908 {
1909 return _activation_info;
1910 }
Chunosov5124be52017-11-22 20:42:13 +07001911
1912private:
Georgios Pinitas37d080f2019-06-21 18:43:12 +01001913 bool _is_a_reshaped;
1914 bool _is_b_reshaped;
1915 bool _reshape_b_only_on_first_run;
1916 int _depth_output_gemm3d;
1917 bool _reinterpret_input_as_3d;
1918 bool _retain_internal_weights;
1919 GEMMLowpOutputStageInfo _gemmlowp_output_stage;
1920 bool _fp_mixed_precision;
1921 bool _broadcast_bias;
1922 bool _pretranpose_B;
Gian Marco Iodicef3622be2019-07-29 14:27:16 +01001923 ActivationLayerInfo _activation_info;
Chunosov5124be52017-11-22 20:42:13 +07001924};
1925
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001926/** Winograd information */
1927struct WinogradInfo
1928{
1929 /** Default constructor
1930 *
1931 * @param[in] output_tile_sz Width and height of the output tile
1932 * @param[in] kernel_sz Width and height of the kernel
1933 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1934 * @param[in] conv_info Convolution info (Pads, strides)
1935 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1936 */
1937 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1938 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1939 {
1940 }
1941
1942 Size2D output_tile_size{}; /**< Width and height of the output tile */
1943 Size2D kernel_size{}; /**< Width and height of the kernel*/
1944 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1945 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1946 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1947};
1948
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001949/** IO formatting information class*/
1950struct IOFormatInfo
1951{
1952 /** Precision type used when printing floating point numbers */
1953 enum class PrecisionType
1954 {
1955 Default, /**< Default precision to the one that the current stream has */
1956 Custom, /**< Custom precision specified by the user using the precision parameter */
1957 Full /**< The maximum precision of the floating point representation */
1958 };
1959
1960 /** Specifies the area to be printed, used by Tensor objects */
1961 enum class PrintRegion
1962 {
1963 ValidRegion, /**< Prints the valid region of the Tensor object */
1964 NoPadding, /**< Prints the Tensor object without the padding */
1965 Full /**< Print the tensor object including padding */
1966 };
1967
Alex Gildayc357c472018-03-21 13:54:09 +00001968 /** Construct a set of IO formatting information.
1969 *
1970 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1971 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1972 * @param[in] precision Precision value for float point numbers. Default: 10.
1973 * @param[in] align_columns Whether to align columns when printed. Default: true.
1974 * @param[in] element_delim Delimeter between elements. Default: " ".
1975 * @param[in] row_delim Delimenter between rows. Default: "\n".
1976 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001977 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1978 PrecisionType precision_type = PrecisionType::Default,
1979 unsigned int precision = 10,
1980 bool align_columns = true,
1981 std::string element_delim = " ",
1982 std::string row_delim = "\n")
1983 : print_region(print_region),
1984 precision_type(precision_type),
1985 precision(precision),
1986 element_delim(element_delim),
1987 row_delim(row_delim),
1988 align_columns(align_columns)
1989 {
1990 }
1991
Alex Gildayc357c472018-03-21 13:54:09 +00001992 /** Area to be printed by Tensor objects */
1993 PrintRegion print_region;
1994 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001995 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001996 /** Floating point precision */
1997 unsigned int precision;
1998 /** Element delimeter */
1999 std::string element_delim;
2000 /** Row delimeter */
2001 std::string row_delim;
2002 /** Align columns */
2003 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01002004};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00002005} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01002006#endif /* __ARM_COMPUTE_TYPES_H__ */