blob: 5a469a6c9a29c06a679d961c1165d312f796db2e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinicc5171b2019-01-09 17:04:39 +00002 * Copyright (c) 2016-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000028#include "arm_compute/core/QAsymm8.h"
29#include "arm_compute/core/Rounding.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000030#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000031#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010033#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000035#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include <cstddef>
37#include <cstdint>
38#include <string>
39#include <utility>
40
41namespace arm_compute
42{
Georgios Pinitas583137c2017-08-31 18:12:42 +010043/** 16-bit floating point type */
44using half = half_float::half;
45
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000046/** Permutation vector */
47using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010048/** Bidirectional strides */
49using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000050
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051/** Image colour formats */
52enum class Format
53{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070054 UNKNOWN, /**< Unknown image format */
55 U8, /**< 1 channel, 1 U8 per channel */
56 S16, /**< 1 channel, 1 S16 per channel */
57 U16, /**< 1 channel, 1 U16 per channel */
58 S32, /**< 1 channel, 1 S32 per channel */
59 U32, /**< 1 channel, 1 U32 per channel */
60 F16, /**< 1 channel, 1 F16 per channel */
61 F32, /**< 1 channel, 1 F32 per channel */
62 UV88, /**< 2 channel, 1 U8 per channel */
63 RGB888, /**< 3 channels, 1 U8 per channel */
64 RGBA8888, /**< 4 channels, 1 U8 per channel */
65 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
66 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
67 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
68 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
69 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
70 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071};
72
73/** Available data types */
74enum class DataType
75{
Alex Gildayc357c472018-03-21 13:54:09 +000076 UNKNOWN, /**< Unknown data type */
77 U8, /**< unsigned 8-bit number */
78 S8, /**< signed 8-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000079 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 U16, /**< unsigned 16-bit number */
81 S16, /**< signed 16-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000082 U32, /**< unsigned 32-bit number */
83 S32, /**< signed 32-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000084 U64, /**< unsigned 64-bit number */
85 S64, /**< signed 64-bit number */
86 F16, /**< 16-bit floating-point number */
87 F32, /**< 32-bit floating-point number */
88 F64, /**< 64-bit floating-point number */
89 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090};
91
Daniil Efremov02bf80d2017-11-22 00:26:51 +070092/** Available Sampling Policies */
93enum class SamplingPolicy
94{
95 CENTER, /**< Samples are taken at pixel center */
96 TOP_LEFT /**< Samples are taken at pixel top left corner */
97};
98
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099/** Constant value of the border pixels when using BorderMode::CONSTANT */
100constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
101
Alex Gildayc357c472018-03-21 13:54:09 +0000102/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103constexpr float SCALE_PYRAMID_HALF = 0.5f;
104
Alex Gildayc357c472018-03-21 13:54:09 +0000105/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
107
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000108/** [DataLayout enum definition] **/
109
Georgios Pinitas4074c992018-01-30 18:13:46 +0000110/** Supported tensor data layouts */
111enum class DataLayout
112{
Alex Gildayc357c472018-03-21 13:54:09 +0000113 UNKNOWN, /**< Unknown data layout */
114 NCHW, /**< Num samples, channels, height, width */
115 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000116};
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000117/** [DataLayout enum definition] **/
Georgios Pinitas4074c992018-01-30 18:13:46 +0000118
Isabella Gottardid17a6772018-02-27 17:41:55 +0000119/** Supported tensor data layout dimensions */
120enum class DataLayoutDimension
121{
Alex Gildayc357c472018-03-21 13:54:09 +0000122 CHANNEL, /**< channel */
123 HEIGHT, /**< height */
124 WIDTH, /**< width */
125 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000126};
127
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000128/** Available ConvolutionMethod*/
129enum class ConvolutionMethod
130{
131 GEMM, /**< Convolution using GEMM */
132 DIRECT, /**< Direct convolution */
133 WINOGRAD /**< Convolution using Winograd */
134};
135
Usama Arif89890c62019-03-19 10:57:05 +0000136/** Padding mode to use for PadLayer */
137enum class PaddingMode
138{
139 CONSTANT,
140 REFLECT,
141 SYMMETRIC
142};
143
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000144/** Supported comparison operations */
145enum class ComparisonOperation
146{
147 Equal, /**< Equal comparison ( \f$ x == y \f$ ) */
148 NotEqual, /**< NotEqual comparison ( \f$ x != y \f$ ) */
149 Greater, /**< Greater comparison ( \f$ x > y \f$ ) */
150 GreaterEqual, /**< Greater equal comparison ( \f$ x >= y \f$ ) */
151 Less, /**< Less comparison ( \f$ x < y \f$ ) */
152 LessEqual /**< Less equal comparison ( \f$ x <= y \f$ ) */
153};
154
Michel Iwaniec00633802017-10-12 14:14:15 +0100155/** Quantization settings (used for QASYMM8 data type) */
156struct QuantizationInfo
157{
Alex Gildayc357c472018-03-21 13:54:09 +0000158 /** Default constructor */
Georgios Pinitasf8d8f3a2018-06-06 17:57:04 +0100159 QuantizationInfo() noexcept
160 : scale(0.0f),
161 offset(0)
Michel Iwaniec00633802017-10-12 14:14:15 +0100162 {
163 }
164
Alex Gildayc357c472018-03-21 13:54:09 +0000165 /** Construct quantization info.
166 *
167 * @param[in] scale Scale.
168 * @param[in] offset Offset.
169 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100170 QuantizationInfo(float scale, int offset)
171 : scale(scale), offset(offset)
172 {
173 }
174
Alex Gildayc357c472018-03-21 13:54:09 +0000175 /** Check whether equal to a given quantization info.
176 *
177 * @param[in] other Other quantization info.
178 *
179 * @return True if the given quantization info is the same.
180 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100181 bool operator==(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700182 {
183 return scale == other.scale && offset == other.offset;
184 }
185
Alex Gildayc357c472018-03-21 13:54:09 +0000186 /** Check whether not equal to a given quantization info.
187 *
188 * @param[in] other Other quantization info.
189 *
190 * @return True if the given quantization info is not the same.
191 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100192 bool operator!=(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700193 {
194 return !(*this == other);
195 }
196
Michel Iwaniec00633802017-10-12 14:14:15 +0100197 float scale; /**< scale */
198 int offset; /**< offset */
199
Alex Gildayc357c472018-03-21 13:54:09 +0000200 /** Quantizes a value using the scale/offset in this QuantizationInfo
201 *
202 * @param[in] value Value to quantize.
203 * @param[in] rounding_policy Policy to use when rounding.
204 *
205 * @return the quantized value.
206 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000207 qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100208 {
209 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000210 return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
Michel Iwaniec00633802017-10-12 14:14:15 +0100211 }
212
Alex Gildayc357c472018-03-21 13:54:09 +0000213 /** Dequantizes a value using the scale/offset in this QuantizationInfo
214 *
215 * @param[in] value Value to dequantize.
216 *
217 * @return the original value before quantization.
218 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000219 float dequantize(qasymm8_t value) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100220 {
221 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000222 return scvt_f32_qasymm8(value, scale, offset);
Michel Iwaniec00633802017-10-12 14:14:15 +0100223 }
224
Alex Gildayc357c472018-03-21 13:54:09 +0000225 /** Indicates whether this QuantizationInfo has valid settings or not
226 *
227 * @return True if the this has invalid settings.
228 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100229 bool empty() const
230 {
231 return scale == 0;
232 }
233};
234
Alex Gildayc357c472018-03-21 13:54:09 +0000235/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100236struct ValidRegion
237{
Alex Gildayc357c472018-03-21 13:54:09 +0000238 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100239 ValidRegion()
240 : anchor{}, shape{}
241 {
242 }
243
Alex Gildayc357c472018-03-21 13:54:09 +0000244 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100245 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000246 /** Allow instances of this class to be move constructed */
247 ValidRegion(ValidRegion &&) = default;
248 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100249 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000250 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100251 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000252 /** Default destructor */
253 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100254
Alex Gildayc357c472018-03-21 13:54:09 +0000255 /** Constructor for a valid region with default number of dimensions
256 *
257 * @param[in] an_anchor Anchor for the start of the valid region.
258 * @param[in] a_shape Shape of the valid region.
259 *
260 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000261 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
262 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100263 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000264 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
265 }
266
Alex Gildayc357c472018-03-21 13:54:09 +0000267 /** Constructor for a valid region with specified number of dimensions
268 *
269 * @param[in] an_anchor Anchor for the start of the valid region.
270 * @param[in] a_shape Shape of the valid region.
271 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
272 *
273 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000274 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
275 : anchor{ an_anchor }, shape{ a_shape }
276 {
277 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
278 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100279 }
280
281 /** Return the start of the valid region for the given dimension @p d */
282 int start(unsigned int d) const
283 {
284 return anchor[d];
285 }
286
287 /** Return the end of the valid region for the given dimension @p d */
288 int end(unsigned int d) const
289 {
290 return anchor[d] + shape[d];
291 }
292
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000293 /** Accessor to set the value of anchor and shape for one of the dimensions.
294 *
295 * @param[in] dimension Dimension for which the value is set.
296 * @param[in] start Value to be set in anchor for the dimension.
297 * @param[in] size Value to be set in shape for the dimension.
298 *
299 * @return *this.
300 */
301 ValidRegion &set(size_t dimension, int start, size_t size)
302 {
303 anchor.set(dimension, start);
304 shape.set(dimension, size);
305 return *this;
306 }
307
Alex Gildayc357c472018-03-21 13:54:09 +0000308 Coordinates anchor; /**< Anchor for the start of the valid region. */
309 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100310};
311
312/** Methods available to handle borders */
313enum class BorderMode
314{
315 UNDEFINED, /**< Borders are left undefined */
316 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
317 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
318};
319
320/** Container for 2D border size */
321struct BorderSize
322{
323 /** Empty border, i.e. no border */
324 constexpr BorderSize()
325 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
326 {
327 }
328
329 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100330 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100331 : top{ size }, right{ size }, bottom{ size }, left{ size }
332 {
333 }
334
335 /** Border with same size for top/bottom and left/right */
336 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
337 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
338 {
339 }
340
341 /** Border with different sizes */
342 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
343 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
344 {
345 }
346
347 /** Check if the entire border is zero */
348 constexpr bool empty() const
349 {
350 return top == 0 && right == 0 && bottom == 0 && left == 0;
351 }
352
353 /** Check if the border is the same size on all sides */
354 constexpr bool uniform() const
355 {
356 return top == right && top == bottom && top == left;
357 }
358
Alex Gildayc357c472018-03-21 13:54:09 +0000359 /** Scale this border size.
360 *
361 * @param[in] scale Scale to multiply border size by.
362 *
363 * @return *this.
364 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100365 BorderSize &operator*=(float scale)
366 {
367 top *= scale;
368 right *= scale;
369 bottom *= scale;
370 left *= scale;
371
372 return *this;
373 }
374
Alex Gildayc357c472018-03-21 13:54:09 +0000375 /** Scale a copy of this border size.
376 *
377 * @param[in] scale Scale to multiply border size by.
378 *
379 * @return a scaled copy of this.
380 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100381 BorderSize operator*(float scale)
382 {
383 BorderSize size = *this;
384 size *= scale;
385
386 return size;
387 }
388
Alex Gildayc357c472018-03-21 13:54:09 +0000389 /** Limit this border size.
390 *
391 * @param[in] limit Border size to limit this border size to.
392 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100393 void limit(const BorderSize &limit)
394 {
395 top = std::min(top, limit.top);
396 right = std::min(right, limit.right);
397 bottom = std::min(bottom, limit.bottom);
398 left = std::min(left, limit.left);
399 }
400
Alex Gildayc357c472018-03-21 13:54:09 +0000401 unsigned int top; /**< top of the border */
402 unsigned int right; /**< right of the border */
403 unsigned int bottom; /**< bottom of the border */
404 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100405};
406
Alex Gildayc357c472018-03-21 13:54:09 +0000407/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100408using PaddingSize = BorderSize;
409
410/** Policy to handle overflow */
411enum class ConvertPolicy
412{
413 WRAP, /**< Wrap around */
414 SATURATE /**< Saturate */
415};
416
417/** Interpolation method */
418enum class InterpolationPolicy
419{
420 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
421 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
422 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
423};
424
425/** Bilinear Interpolation method used by LKTracker */
426enum class BilinearInterpolation
427{
Alex Gildayc357c472018-03-21 13:54:09 +0000428 BILINEAR_OLD_NEW, /**< Old-new method */
429 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100430};
431
432/** Threshold mode */
433enum class ThresholdType
434{
435 BINARY, /**< Threshold with one value */
436 RANGE /**< Threshold with two values*/
437};
438
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100439/** Termination criteria */
440enum class Termination
441{
Alex Gildayc357c472018-03-21 13:54:09 +0000442 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
443 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
444 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100445};
446
447/** Magnitude calculation type. */
448enum class MagnitudeType
449{
450 L1NORM, /**< L1 normalization type */
451 L2NORM /**< L2 normalization type */
452};
453
454/** Phase calculation type.
455 *
456 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
457 */
458enum class PhaseType
459{
460 SIGNED, /**< Angle range: [0, 360] */
461 UNSIGNED /**< Angle range: [0, 180] */
462};
463
464/** Keypoint type */
465struct KeyPoint
466{
467 int32_t x{ 0 }; /**< X coordinates */
468 int32_t y{ 0 }; /**< Y coordinates */
469 float strength{ 0.f }; /**< Strength of the point */
470 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
471 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
472 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
473 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
474};
475
Alex Gildayc357c472018-03-21 13:54:09 +0000476/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100477using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
478
479/** Rectangle type */
480struct Rectangle
481{
482 uint16_t x; /**< Top-left x coordinate */
483 uint16_t y; /**< Top-left y coordinate */
484 uint16_t width; /**< Width of the rectangle */
485 uint16_t height; /**< Height of the rectangle */
486};
487
488/** Coordinate type */
489struct Coordinates2D
490{
491 int32_t x; /**< X coordinates */
492 int32_t y; /**< Y coordinates */
493};
494
495/** Coordinate type */
496struct Coordinates3D
497{
498 uint32_t x; /**< X coordinates */
499 uint32_t y; /**< Y coordinates */
500 uint32_t z; /**< Z coordinates */
501};
502
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100503/** Padding information as a pair of unsigned int start/end */
504using PaddingInfo = std::pair<uint32_t, uint32_t>;
505
506/** List of padding information */
507using PaddingList = std::vector<PaddingInfo>;
508
giuros013175fcf2018-11-21 09:59:17 +0000509/** Information to produce a tiled version of a Tensor */
510using Multiples = std::vector<uint32_t>;
511
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100512/** Available channels */
513enum class Channel
514{
515 UNKNOWN, /** Unknown channel format */
516 C0, /**< First channel (used by formats with unknown channel types). */
517 C1, /**< Second channel (used by formats with unknown channel types). */
518 C2, /**< Third channel (used by formats with unknown channel types). */
519 C3, /**< Fourth channel (used by formats with unknown channel types). */
520 R, /**< Red channel. */
521 G, /**< Green channel. */
522 B, /**< Blue channel. */
523 A, /**< Alpha channel. */
524 Y, /**< Luma channel. */
525 U, /**< Cb/U channel. */
526 V /**< Cr/V/Value channel. */
527};
528
529/** Available matrix patterns */
530enum class MatrixPattern
531{
532 BOX, /**< Box pattern matrix. */
533 CROSS, /**< Cross pattern matrix. */
534 DISK, /**< Disk pattern matrix. */
535 OTHER /**< Any other matrix pattern. */
536};
537
538/** Available non linear functions. */
539enum class NonLinearFilterFunction : unsigned
540{
541 MEDIAN = 0, /**< Non linear median filter. */
542 MIN = 1, /**< Non linear erode. */
543 MAX = 2, /**< Non linear dilate. */
544};
545
Georgios Pinitasd9769582017-08-03 10:19:40 +0100546/** Available reduction operations */
547enum class ReductionOperation
548{
Michalis Spyrou7930db42018-11-22 17:36:28 +0000549 ARG_IDX_MAX, /**< Index of the max value */
Manuel Bottinib412fab2018-12-10 17:40:23 +0000550 ARG_IDX_MIN, /**< Index of the min value */
551 MEAN_SUM, /**< Mean of sum */
552 PROD, /**< Product */
553 SUM_SQUARE, /**< Sum of squares */
554 SUM /**< Sum */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100555};
556
giuros01164a2722018-11-20 18:34:46 +0000557/** Available element-wise operations */
558enum class ArithmeticOperation
559{
560 ADD, /**< (x + y) */
561 SUB, /**< (x - y) */
562 DIV, /**< (x / y) */
563 MIN, /**< Min(x, y) */
564 MAX, /**< Max(x, y) */
565 SQUARED_DIFF, /**< (x - y)^2 */
566};
567
Michalis Spyroue9362622018-11-23 17:41:37 +0000568/** Available element wise unary operations */
569enum class ElementWiseUnary
570{
571 RSQRT, /**< Reverse square root */
572 EXP, /**< Exponential */
573};
574
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100575/** The normalization type used for the normalization layer */
576enum class NormType
577{
578 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
579 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
580 CROSS_MAP /**< Normalization applied cross maps */
581};
582
583/** Normalization type for Histogram of Oriented Gradients (HOG) */
584enum class HOGNormType
585{
586 L2_NORM = 1, /**< L2-norm */
587 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
588 L1_NORM = 3 /**< L1 norm */
589};
590
591/** Detection window used for the object detection. The detection window keeps the following information:
592 *
593 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
594 * -# Index of the class used for evaluating which class the detection window belongs to
595 * -# Confidence value (score) obtained with the classifier
596 */
597struct DetectionWindow
598{
599 uint16_t x{ 0 }; /**< Top-left x coordinate */
600 uint16_t y{ 0 }; /**< Top-left y coordinate */
601 uint16_t width{ 0 }; /**< Width of the detection window */
602 uint16_t height{ 0 }; /**< Height of the detection window */
603 uint16_t idx_class{ 0 }; /**< Index of the class */
604 float score{ 0.f }; /**< Confidence value for the detection window */
605};
606
607/** Dimension rounding type when down-scaling on CNNs
608 * @note Used in pooling and convolution layer
609 */
610enum class DimensionRoundingType
611{
612 FLOOR, /**< Floor rounding */
613 CEIL /**< Ceil rounding */
614};
615
616/** Available pooling types */
617enum class PoolingType
618{
619 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100620 AVG, /**< Average Pooling */
621 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100622};
623
Michalis Spyrou2709d612018-09-19 09:46:47 +0100624/** Available non maxima suppression types */
625enum class NMSType
626{
627 LINEAR, /**< Linear NMS */
628 GAUSSIAN, /**< Gaussian NMS */
629 ORIGINAL /**< Original NMS */
630};
631
632/** BoxWithNonMaximaSuppressionLimit Information class */
633class BoxNMSLimitInfo final
634{
635public:
636 /** Constructor
637 *
638 * @param[in] score_thresh (Optional) Score threshold.
639 * @param[in] nms (Optional) NMS value
640 * @param[in] detections (Optional) Number of detections
641 * @param[in] soft_nms_enabled (Optional) Enable SoftNMS
642 * @param[in] soft_nms_method (Optional) Soft NMS method
643 * @param[in] soft_nms_sigma (Optional) Soft NMS sigma value
644 * @param[in] soft_nms_min_score_thres (Optional) Soft NMS minimum score threshold
Manuel Bottini5209be52019-02-13 16:34:56 +0000645 * @param[in] suppress_size (Optional) Filter out boxes based on their size. Defaults to false
646 * @param[in] min_size (Optional) Smaller boxes than min_size will be filtered out. Defaults to 1
647 * @param[in] im_width (Optional) Boxes whose centers (on the x axis) is beyond im_width will be filtered. Defaults to 1
648 * @param[in] im_height (Optional) Boxes whose centers (on the y axis) is beyond im_height will be filtered. Defaults to 1
Michalis Spyrou2709d612018-09-19 09:46:47 +0100649 */
650 BoxNMSLimitInfo(float score_thresh = 0.05f, float nms = 0.3f,
651 int detections = 100, bool soft_nms_enabled = false,
652 NMSType soft_nms_method = NMSType::LINEAR,
Manuel Bottini5209be52019-02-13 16:34:56 +0000653 float soft_nms_sigma = 0.5f, float soft_nms_min_score_thres = 0.001f, bool suppress_size = false, float min_size = 1.0f, float im_width = 1.0f, float im_height = 1.0f)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100654 : _score_thresh(score_thresh), _nms(nms), _detections_per_im(detections), _soft_nms_enabled(soft_nms_enabled), _soft_nms_method(soft_nms_method), _soft_nms_sigma(soft_nms_sigma),
Manuel Bottini5209be52019-02-13 16:34:56 +0000655 _soft_nms_min_score_thres(soft_nms_min_score_thres), _suppress_size(suppress_size), _min_size(min_size), _im_width(im_width), _im_height(im_height)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100656 {
657 }
658 /** Get the score threshold */
659 float score_thresh() const
660 {
661 return _score_thresh;
662 }
663 /** Get the NMS */
664 float nms() const
665 {
666 return _nms;
667 }
668 /** Get the number of detections */
669 int detections_per_im() const
670 {
671 return _detections_per_im;
672 }
673 /** Check if soft NMS is enabled */
674 bool soft_nms_enabled() const
675 {
676 return _soft_nms_enabled;
677 }
678 /** Get soft NMS method */
679 NMSType soft_nms_method() const
680 {
681 return _soft_nms_method;
682 }
683 /** Get soft NMS sigma */
684 float soft_nms_sigma() const
685 {
686 return _soft_nms_sigma;
687 }
688 /** Get soft nms min score threshold */
689 float soft_nms_min_score_thres() const
690 {
691 return _soft_nms_min_score_thres;
692 }
Manuel Bottini5209be52019-02-13 16:34:56 +0000693 /** Get if NMS will suppress boxes based on their size/position */
694 bool suppress_size() const
695 {
696 return _suppress_size;
697 }
698 /** Get size suppression threshold */
699 float min_size() const
700 {
701 return _min_size;
702 }
703 /** Get image width (NMS may suppress boxes whose center sits beyond the image width) */
704 float im_width() const
705 {
706 return _im_width;
707 }
708 /** Get image height (NMS may suppress boxes whose center sits beyond the image height) */
709 float im_height() const
710 {
711 return _im_height;
712 }
Michalis Spyrou2709d612018-09-19 09:46:47 +0100713
714private:
715 float _score_thresh;
716 float _nms;
717 int _detections_per_im;
718 bool _soft_nms_enabled;
719 NMSType _soft_nms_method;
720 float _soft_nms_sigma;
721 float _soft_nms_min_score_thres;
Manuel Bottini5209be52019-02-13 16:34:56 +0000722 bool _suppress_size;
723 float _min_size;
724 float _im_width;
725 float _im_height;
Michalis Spyrou2709d612018-09-19 09:46:47 +0100726};
727
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100728/** Padding and stride information class */
729class PadStrideInfo
730{
731public:
732 /** Constructor
733 *
734 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
735 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
736 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
737 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
738 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
739 */
740 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
741 unsigned int pad_x = 0, unsigned int pad_y = 0,
742 DimensionRoundingType round = DimensionRoundingType::FLOOR)
743 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100744 _pad_left(pad_x),
745 _pad_top(pad_y),
746 _pad_right(pad_x),
747 _pad_bottom(pad_y),
748 _round_type(round)
749 {
750 }
751 /** Constructor
752 *
753 * @param[in] stride_x Stride, in elements, across x.
754 * @param[in] stride_y Stride, in elements, across y.
755 * @param[in] pad_left Padding across x on the left, in elements.
756 * @param[in] pad_top Padding across y on the top, in elements.
757 * @param[in] pad_right Padding across x on the right, in elements.
758 * @param[in] pad_bottom Padding across y on the bottom, in elements.
759 * @param[in] round Dimensions rounding.
760 */
761 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
762 unsigned int pad_left, unsigned int pad_right,
763 unsigned int pad_top, unsigned int pad_bottom,
764 DimensionRoundingType round)
765 : _stride(std::make_pair(stride_x, stride_y)),
766 _pad_left(pad_left),
767 _pad_top(pad_top),
768 _pad_right(pad_right),
769 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100770 _round_type(round)
771 {
772 }
Alex Gildayc357c472018-03-21 13:54:09 +0000773 /** Get the stride.
774 *
775 * @return a pair: stride x, stride y.
776 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100777 std::pair<unsigned int, unsigned int> stride() const
778 {
779 return _stride;
780 }
Alex Gildayc357c472018-03-21 13:54:09 +0000781 /** Check whether the padding is symmetric.
782 *
783 * @return True if the padding is symmetric.
784 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000785 bool padding_is_symmetric() const
786 {
787 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
788 }
Alex Gildayc357c472018-03-21 13:54:09 +0000789 /** Get the padding.
790 *
791 * @note This should only be used when the padding is symmetric.
792 *
793 * @return a pair: padding left/right, padding top/bottom
794 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100795 std::pair<unsigned int, unsigned int> pad() const
796 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100797 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000798 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100799 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100800 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100801
Alex Gildayc357c472018-03-21 13:54:09 +0000802 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100803 unsigned int pad_left() const
804 {
805 return _pad_left;
806 }
Alex Gildayc357c472018-03-21 13:54:09 +0000807 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100808 unsigned int pad_right() const
809 {
810 return _pad_right;
811 }
Alex Gildayc357c472018-03-21 13:54:09 +0000812 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100813 unsigned int pad_top() const
814 {
815 return _pad_top;
816 }
Alex Gildayc357c472018-03-21 13:54:09 +0000817 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100818 unsigned int pad_bottom() const
819 {
820 return _pad_bottom;
821 }
822
Alex Gildayc357c472018-03-21 13:54:09 +0000823 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100824 DimensionRoundingType round() const
825 {
826 return _round_type;
827 }
828
Alex Gildayc357c472018-03-21 13:54:09 +0000829 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100830 bool has_padding() const
831 {
832 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
833 }
834
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100835private:
836 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100837 unsigned int _pad_left;
838 unsigned int _pad_top;
839 unsigned int _pad_right;
840 unsigned int _pad_bottom;
841
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100842 DimensionRoundingType _round_type;
843};
844
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100845/** Fully connected layer info */
846struct FullyConnectedLayerInfo
847{
848 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
849 bool transpose_weights{ true }; /**< Transpose weights if true. */
850 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
851 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100852
853 /** Sets the weights trained data layout
854 *
855 * @param[in] layout Data layout that the weights were trained with
856 *
857 * @return Updated object
858 */
859 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
860 {
861 weights_trained_layout = layout;
862 return *this;
863 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100864 /** Sets the transpose weights flag
865 *
866 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
867 *
868 * @return Updated object
869 */
870 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
871 {
872 transpose_weights = should_transpose_weights;
873 return *this;
874 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100875};
876
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100877/** PriorBox layer info */
878class PriorBoxLayerInfo final
879{
880public:
881 /** Default Constructor */
882 PriorBoxLayerInfo()
883 : _min_sizes(),
884 _variances(),
885 _offset(),
886 _flip(true),
887 _clip(false),
888 _max_sizes(),
889 _aspect_ratios(),
890 _img_size(),
891 _steps()
892 {
893 }
894 /** Constructor
895 *
896 * @param[in] min_sizes Min sizes vector.
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100897 * @param[in] variances Variances vector.
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100898 * @param[in] offset Offset value.
899 * @param[in] flip (Optional) Flip the aspect ratios.
900 * @param[in] clip (Optional) Clip coordinates so that they're within [0,1].
901 * @param[in] max_sizes (Optional) Max sizes vector.
902 * @param[in] aspect_ratios (Optional) Aspect ratios of the boxes.
903 * @param[in] img_size (Optional) Image size.
904 * @param[in] steps (Optional) Step values.
905 */
906 PriorBoxLayerInfo(const std::vector<float> &min_sizes, const std::vector<float> &variances, float offset, bool flip = true, bool clip = false,
Pablo Tello32521432018-11-15 14:43:10 +0000907 const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {},
908 const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100909 : _min_sizes(min_sizes),
910 _variances(variances),
911 _offset(offset),
912 _flip(flip),
913 _clip(clip),
914 _max_sizes(max_sizes),
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100915 _aspect_ratios(),
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100916 _img_size(img_size),
917 _steps(steps)
918 {
919 _aspect_ratios.push_back(1.);
920 for(unsigned int i = 0; i < aspect_ratios.size(); ++i)
921 {
922 float ar = aspect_ratios[i];
923 bool already_exist = false;
924 for(auto ar_new : _aspect_ratios)
925 {
926 if(fabs(ar - ar_new) < 1e-6)
927 {
928 already_exist = true;
929 break;
930 }
931 }
932 if(!already_exist)
933 {
934 _aspect_ratios.push_back(ar);
935 if(flip)
936 {
937 _aspect_ratios.push_back(1.f / ar);
938 }
939 }
940 }
941 }
942 /** Get min sizes. */
943 std::vector<float> min_sizes() const
944 {
945 return _min_sizes;
946 }
947 /** Get min variances. */
948 std::vector<float> variances() const
949 {
950 return _variances;
951 }
952 /** Get the step coordinates */
953 std::array<float, 2> steps() const
954 {
955 return _steps;
956 }
957 /** Get the image size coordinates */
958 Coordinates2D img_size() const
959 {
960 return _img_size;
961 }
962 /** Get the offset */
963 float offset() const
964 {
965 return _offset;
966 }
967 /** Get the flip value */
968 bool flip() const
969 {
970 return _flip;
971 }
972 /** Get the clip value */
973 bool clip() const
974 {
975 return _clip;
976 }
977 /** Get max sizes. */
978 std::vector<float> max_sizes() const
979 {
980 return _max_sizes;
981 }
982 /** Get aspect ratios. */
983 std::vector<float> aspect_ratios() const
984 {
985 return _aspect_ratios;
986 }
987
988private:
989 std::vector<float> _min_sizes;
990 std::vector<float> _variances;
991 float _offset;
992 bool _flip;
993 bool _clip;
994 std::vector<float> _max_sizes;
995 std::vector<float> _aspect_ratios;
996 Coordinates2D _img_size;
997 std::array<float, 2> _steps;
998};
999
Isabella Gottardi05e56442018-11-16 11:26:52 +00001000/** Available Detection Output code types */
1001enum class DetectionOutputLayerCodeType
1002{
1003 CORNER, /**< Use box corners */
1004 CENTER_SIZE, /**< Use box centers and size */
1005 CORNER_SIZE, /**< Use box centers and size */
1006 TF_CENTER /**< Use box centers and size but flip x and y co-ordinates */
1007};
1008
1009/** Detection Output layer info */
1010class DetectionOutputLayerInfo final
1011{
1012public:
1013 /** Default Constructor */
1014 DetectionOutputLayerInfo()
1015 : _num_classes(),
1016 _share_location(),
1017 _code_type(DetectionOutputLayerCodeType::CORNER),
1018 _keep_top_k(),
1019 _nms_threshold(),
1020 _top_k(),
1021 _background_label_id(),
1022 _confidence_threshold(),
1023 _variance_encoded_in_target(false),
1024 _eta(),
1025 _num_loc_classes()
1026 {
1027 _num_loc_classes = _share_location ? 1 : _num_classes;
1028 }
1029 /** Constructor
1030 *
1031 * @param[in] num_classes Number of classes to be predicted.
1032 * @param[in] share_location If true, bounding box are shared among different classes.
1033 * @param[in] code_type Type of coding method for bbox.
1034 * @param[in] keep_top_k Number of total bounding boxes to be kept per image after NMS step.
1035 * @param[in] nms_threshold Threshold to be used in NMS.
1036 * @param[in] top_k (Optional) Number of boxes per image with top confidence scores that are fed into the NMS algorithm. Default set to -1.
1037 * @param[in] background_label_id (Optional) Background label ID. If there is no background class, set it as -1.
1038 * @param[in] confidence_threshold (Optional) Only consider detections whose confidences are larger than a threshold. Default set to -FLT_MAX.
1039 * @param[in] variance_encoded_in_target (Optional) If true, variance is encoded in target. Otherwise we need to adjust the predicted offset accordingly.Default set to false.
1040 * @param[in] eta (Optional) Eta.
1041 */
1042 DetectionOutputLayerInfo(int num_classes, bool share_location, DetectionOutputLayerCodeType code_type, int keep_top_k, float nms_threshold, int top_k = -1, int background_label_id = -1,
1043 float confidence_threshold = std::numeric_limits<float>::lowest(), bool variance_encoded_in_target = false, float eta = 1)
1044 : _num_classes(num_classes),
1045 _share_location(share_location),
1046 _code_type(code_type),
1047 _keep_top_k(keep_top_k),
1048 _nms_threshold(nms_threshold),
1049 _top_k(top_k),
1050 _background_label_id(background_label_id),
1051 _confidence_threshold(confidence_threshold),
1052 _variance_encoded_in_target(variance_encoded_in_target),
1053 _eta(eta),
1054 _num_loc_classes()
1055 {
1056 _num_loc_classes = _share_location ? 1 : _num_classes;
1057 }
1058 /** Get num classes. */
1059 int num_classes() const
1060 {
1061 return _num_classes;
1062 }
1063 /** Get share location. */
1064 bool share_location() const
1065 {
1066 return _share_location;
1067 }
1068 /** Get detection output code type. */
1069 DetectionOutputLayerCodeType code_type() const
1070 {
1071 return _code_type;
1072 }
1073 /** Get if variance encoded in target. */
1074 bool variance_encoded_in_target() const
1075 {
1076 return _variance_encoded_in_target;
1077 }
1078 /** Get the number of total bounding boxes to be kept per image. */
1079 int keep_top_k() const
1080 {
1081 return _keep_top_k;
1082 }
1083 /** Get nms threshold. */
1084 float nms_threshold() const
1085 {
1086 return _nms_threshold;
1087 }
1088 /** Get eta. */
1089 float eta() const
1090 {
1091 return _eta;
1092 }
1093 /** Get background label ID. */
1094 int background_label_id() const
1095 {
1096 return _background_label_id;
1097 }
1098 /** Get confidence threshold. */
1099 float confidence_threshold() const
1100 {
1101 return _confidence_threshold;
1102 }
1103 /** Get top K. */
1104 int top_k() const
1105 {
1106 return _top_k;
1107 }
1108 /** Get number of location classes. */
1109 int num_loc_classes() const
1110 {
1111 return _num_loc_classes;
1112 }
1113
1114private:
1115 int _num_classes;
1116 bool _share_location;
1117 DetectionOutputLayerCodeType _code_type;
1118 int _keep_top_k;
1119 float _nms_threshold;
1120 int _top_k;
1121 int _background_label_id;
1122 float _confidence_threshold;
1123 bool _variance_encoded_in_target;
1124 float _eta;
1125 int _num_loc_classes;
1126};
1127
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001128/** Pooling Layer Information class */
1129class PoolingLayerInfo
1130{
1131public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001132 /** Default Constructor */
1133 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001134 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001135 {
1136 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001137 /** Default Constructor
1138 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001139 * @param[in] pool_type Pooling type @ref PoolingType.
1140 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001141 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001142 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1143 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1144 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001145 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001146 explicit PoolingLayerInfo(PoolingType pool_type,
1147 unsigned int pool_size,
1148 PadStrideInfo pad_stride_info = PadStrideInfo(),
1149 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001150 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1151 {
1152 }
1153 /** Default Constructor
1154 *
1155 * @param[in] pool_type Pooling type @ref PoolingType.
1156 * @param[in] pool_size Pooling size, in elements, across x and y.
1157 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
1158 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1159 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1160 * Defaults to false;
1161 */
1162 explicit PoolingLayerInfo(PoolingType pool_type,
1163 Size2D pool_size,
1164 PadStrideInfo pad_stride_info = PadStrideInfo(),
1165 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001166 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1167 {
1168 }
1169 /** Default Constructor
1170 *
1171 * @note This constructor is used for global pooling
1172 *
1173 * @param[in] pool_type Pooling type @ref PoolingType.
1174 */
1175 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001176 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001177 {
1178 }
Alex Gildayc357c472018-03-21 13:54:09 +00001179 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001180 PoolingType pool_type() const
1181 {
1182 return _pool_type;
1183 }
Alex Gildayc357c472018-03-21 13:54:09 +00001184 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001185 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001186 {
1187 return _pool_size;
1188 }
Alex Gildayc357c472018-03-21 13:54:09 +00001189 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001190 PadStrideInfo pad_stride_info() const
1191 {
1192 return _pad_stride_info;
1193 }
Alex Gildayc357c472018-03-21 13:54:09 +00001194 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001195 bool exclude_padding() const
1196 {
1197 return _exclude_padding;
1198 }
Alex Gildayc357c472018-03-21 13:54:09 +00001199 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001200 bool is_global_pooling() const
1201 {
1202 return _is_global_pooling;
1203 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001204
1205private:
1206 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001207 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001208 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001209 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001210 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001211};
1212
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001213/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +01001214class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001215{
1216public:
giuros0118870812018-09-13 09:31:40 +01001217 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001218 *
giuros0118870812018-09-13 09:31:40 +01001219 * @param[in] pooled_width Pooled width of the layer.
1220 * @param[in] pooled_height Pooled height of the layer.
1221 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
1222 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001223 */
giuros0118870812018-09-13 09:31:40 +01001224 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
1225 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001226 {
1227 }
Alex Gildayc357c472018-03-21 13:54:09 +00001228 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001229 unsigned int pooled_width() const
1230 {
1231 return _pooled_width;
1232 }
Alex Gildayc357c472018-03-21 13:54:09 +00001233 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001234 unsigned int pooled_height() const
1235 {
1236 return _pooled_height;
1237 }
Alex Gildayc357c472018-03-21 13:54:09 +00001238 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001239 float spatial_scale() const
1240 {
1241 return _spatial_scale;
1242 }
giuros0118870812018-09-13 09:31:40 +01001243 /** Get sampling ratio */
1244 unsigned int sampling_ratio() const
1245 {
1246 return _sampling_ratio;
1247 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001248
1249private:
1250 unsigned int _pooled_width;
1251 unsigned int _pooled_height;
1252 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +01001253 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001254};
1255
Manuel Bottini5209be52019-02-13 16:34:56 +00001256/** Generate Proposals Information class */
1257class GenerateProposalsInfo
1258{
1259public:
1260 /** Constructor
1261 *
1262 * @param[in] im_width Width of the original image
1263 * @param[in] im_height Height of the original image
1264 * @param[in] im_scale Scale applied to the original image
1265 * @param[in] spatial_scale (Optional)Scale applied to the feature map. Defaults to 1.0
1266 * @param[in] pre_nms_topN (Optional)Number of the best scores to be selected from the transformations. Defaults to 6000.
1267 * @param[in] post_nms_topN (Optional)Number of the best scores to be selected from the NMS operation. Defaults to 300.
1268 * @param[in] nms_thres (Optional)NMS overlap threshold. Defaults to 0.7.
1269 * @param[in] min_size (Optional)Size used to validate the anchors produced. Defaults to 16.
1270 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region of interest). Defaults to 4.
1271 */
1272 GenerateProposalsInfo(float im_width, float im_height, float im_scale, float spatial_scale = 1.0, int pre_nms_topN = 6000, int post_nms_topN = 300, float nms_thres = 0.7, float min_size = 16.0,
1273 size_t values_per_roi = 4)
1274 : _im_height(im_height), _im_width(im_width), _im_scale(im_scale), _spatial_scale(spatial_scale), _pre_nms_topN(pre_nms_topN), _post_nms_topN(post_nms_topN), _nms_thres(nms_thres),
1275 _min_size(min_size), _values_per_roi(values_per_roi)
1276 {
1277 }
1278
1279 /* Get the original height */
1280 float im_height() const
1281 {
1282 return _im_height;
1283 }
1284 /* Get the original width */
1285 float im_width() const
1286 {
1287 return _im_width;
1288 }
1289 /* Get the image scale */
1290 float im_scale() const
1291 {
1292 return _im_scale;
1293 }
1294 /* Get the value of how many best scores to select (before NMS) */
1295 int pre_nms_topN() const
1296 {
1297 return _pre_nms_topN;
1298 }
1299 /* Get the value of how many best scores to select (after NMS) */
1300 int post_nms_topN() const
1301 {
1302 return _post_nms_topN;
1303 }
1304 /* Get the NMS overlap threshold */
1305 float nms_thres() const
1306 {
1307 return _nms_thres;
1308 }
1309 /* Get the minimal size */
1310 float min_size() const
1311 {
1312 return _min_size;
1313 }
1314 /* Get the spatial scale to be applied to the feature maps */
1315 float spatial_scale() const
1316 {
1317 return _spatial_scale;
1318 }
1319 /* Get the values used to represent a ROI(Region of interest)*/
1320 size_t values_per_roi() const
1321 {
1322 return _values_per_roi;
1323 }
1324
1325private:
1326 float _im_height;
1327 float _im_width;
1328 float _im_scale;
1329 float _spatial_scale;
1330 int _pre_nms_topN;
1331 int _post_nms_topN;
1332 float _nms_thres;
1333 float _min_size;
1334 size_t _values_per_roi;
1335};
1336
1337/** ComputeAnchors information class */
1338class ComputeAnchorsInfo
1339{
1340public:
1341 /** Constructor
1342 *
1343 * @param[in] feat_width Feature map width
1344 * @param[in] feat_height Feature map height
1345 * @param[in] spatial_scale Feature map scale
1346 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region Of Interest). Defaults to 4
1347 */
1348 ComputeAnchorsInfo(float feat_width, float feat_height, float spatial_scale, size_t values_per_roi = 4)
1349 : _feat_height(feat_height),
1350 _feat_width(feat_width),
1351 _spatial_scale(spatial_scale),
1352 _values_per_roi(values_per_roi)
1353 {
1354 }
1355
1356 /* Get the height of the feature map */
1357 float feat_height() const
1358 {
1359 return _feat_height;
1360 }
1361
1362 /* Get the width of the feature map */
1363 float feat_width() const
1364 {
1365 return _feat_width;
1366 }
1367
1368 /* Get the scale of the feature map */
1369 float spatial_scale() const
1370 {
1371 return _spatial_scale;
1372 }
1373
1374 /* Get the values used to represent a ROI(Region Of Interest)*/
1375 size_t values_per_roi() const
1376 {
1377 return _values_per_roi;
1378 }
1379
1380private:
1381 float _feat_height;
1382 float _feat_width;
1383 float _spatial_scale;
1384 size_t _values_per_roi;
1385};
1386
giuros01c04a0e82018-10-03 12:44:35 +01001387/** Bounding Box Transform information class */
giuros01d696cb62018-11-16 10:39:59 +00001388class BoundingBoxTransformInfo final
giuros01c04a0e82018-10-03 12:44:35 +01001389{
1390public:
1391 /** Constructor
1392 *
giuros01d696cb62018-11-16 10:39:59 +00001393 * @param[in] img_width Width of the original image
1394 * @param[in] img_height Height, of the original image
1395 * @param[in] scale Scale of the original image
1396 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
1397 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
1398 * @param[in] correct_transform_coords (Optional)Correct bounding box transform coordinates. Defaults to false
1399 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
giuros01c04a0e82018-10-03 12:44:35 +01001400 */
giuros01d696cb62018-11-16 10:39:59 +00001401 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { { 1.f, 1.f, 1.f, 1.f } }, bool correct_transform_coords =
1402 false,
1403 float bbox_xform_clip =
1404 4.135166556742356f)
1405 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _correct_transform_coords(correct_transform_coords), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
giuros01c04a0e82018-10-03 12:44:35 +01001406 {
1407 }
1408
1409 std::array<float, 4> weights() const
1410 {
1411 return _weights;
1412 }
1413
1414 float bbox_xform_clip() const
1415 {
1416 return _bbox_xform_clip;
1417 }
1418
1419 float img_height() const
1420 {
1421 return _img_height;
1422 }
1423
1424 float img_width() const
1425 {
1426 return _img_width;
1427 }
1428
1429 float scale() const
1430 {
1431 return _scale;
1432 }
1433
1434 bool apply_scale() const
1435 {
1436 return _apply_scale;
1437 }
1438
giuros01d696cb62018-11-16 10:39:59 +00001439 bool correct_transform_coords() const
1440 {
1441 return _correct_transform_coords;
1442 }
1443
giuros01c04a0e82018-10-03 12:44:35 +01001444private:
1445 float _img_width;
1446 float _img_height;
1447 float _scale;
1448 bool _apply_scale;
giuros01d696cb62018-11-16 10:39:59 +00001449 bool _correct_transform_coords;
giuros01c04a0e82018-10-03 12:44:35 +01001450 std::array<float, 4> _weights;
1451 float _bbox_xform_clip;
1452};
1453
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001454/** Activation Layer Information class */
1455class ActivationLayerInfo
1456{
1457public:
1458 /** Available activation functions */
1459 enum class ActivationFunction
1460 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001461 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
1462 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
1463 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
1464 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
1465 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
Manuel Bottini581c8982019-02-07 10:31:57 +00001466 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x) = \begin{cases} \alpha x & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001467 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1468 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
1469 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
1470 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
1471 LINEAR /**< Linear ( \f$ f(x)= ax + b \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001472 };
1473
Giorgio Arena11674872018-02-07 15:38:12 +00001474 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001475 /** Default Constructor
1476 *
1477 * @param[in] f The activation function to use.
1478 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001479 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
1480 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001481 */
1482 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +00001483 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001484 {
1485 }
Alex Gildayc357c472018-03-21 13:54:09 +00001486 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001487 ActivationFunction activation() const
1488 {
1489 return _act;
1490 }
Alex Gildayc357c472018-03-21 13:54:09 +00001491 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001492 float a() const
1493 {
1494 return _a;
1495 }
Alex Gildayc357c472018-03-21 13:54:09 +00001496 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001497 float b() const
1498 {
1499 return _b;
1500 }
Alex Gildayc357c472018-03-21 13:54:09 +00001501 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +00001502 bool enabled() const
1503 {
1504 return _enabled;
1505 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001506
1507private:
Giorgio Arena11674872018-02-07 15:38:12 +00001508 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::LOGISTIC };
1509 float _a = {};
1510 float _b = {};
1511 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001512};
1513
1514/** Normalization Layer Information class */
1515class NormalizationLayerInfo
1516{
1517public:
1518 /** Default Constructor
1519 *
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001520 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NormType::CROSS_MAP
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001521 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +00001522 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
1523 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
1524 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
1525 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
1526 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001527 */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001528 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
1529 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001530 {
1531 }
Alex Gildayc357c472018-03-21 13:54:09 +00001532 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001533 NormType type() const
1534 {
1535 return _type;
1536 }
Alex Gildayc357c472018-03-21 13:54:09 +00001537 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001538 uint32_t norm_size() const
1539 {
1540 return _norm_size;
1541 }
Alex Gildayc357c472018-03-21 13:54:09 +00001542 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001543 float alpha() const
1544 {
1545 return _alpha;
1546 }
Alex Gildayc357c472018-03-21 13:54:09 +00001547 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001548 float beta() const
1549 {
1550 return _beta;
1551 }
Alex Gildayc357c472018-03-21 13:54:09 +00001552 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001553 float kappa() const
1554 {
1555 return _kappa;
1556 }
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001557 /** Get the is_scaled value */
1558 bool is_scaled() const
1559 {
1560 return _is_scaled;
1561 }
Alex Gildayc357c472018-03-21 13:54:09 +00001562 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001563 bool is_cross_map() const
1564 {
1565 return _type == NormType::CROSS_MAP;
1566 }
Alex Gildayc357c472018-03-21 13:54:09 +00001567 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001568 bool is_in_map() const
1569 {
1570 return !is_cross_map();
1571 }
1572 /** Return the scaling factor of the normalization function.
1573 *
1574 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1575 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001576 *
1577 * @return The normalization scaling factor.
1578 */
1579 float scale_coeff() const
1580 {
1581 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001582 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001583 }
1584
1585private:
1586 NormType _type;
1587 uint32_t _norm_size;
1588 float _alpha;
1589 float _beta;
1590 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001591 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001592};
1593
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001594/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001595class WeightsInfo
1596{
1597public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001598 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001599 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001600 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001601 {
1602 }
1603 /** Constructor
1604 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001605 * @param[in] are_reshaped True if the weights have been reshaped
1606 * @param[in] kernel_width Kernel width.
1607 * @param[in] kernel_height Kernel height.
1608 * @param[in] num_kernels Number of convolution kernels.
1609 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001610 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001611 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1612 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001613 {
1614 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001615 /** Flag which specifies if the weights tensor has been reshaped.
1616 *
1617 * @return True if the weights tensors has been reshaped
1618 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001619 bool are_reshaped() const
1620 {
1621 return _are_reshaped;
1622 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001623 /** Return the number of convolution kernels
1624 *
1625 * @return The number of convolution kernels
1626 */
1627 unsigned int num_kernels() const
1628 {
1629 return _num_kernels;
1630 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001631 /** Return the width and height of the kernel
1632 *
1633 * @return The width and height of the kernel
1634 */
1635 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001636 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001637 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001638 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001639 bool retain_internal_weights() const
1640 {
1641 return _retain_internal_weights;
1642 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001643
1644private:
1645 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001646 const unsigned int _kernel_width;
1647 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001648 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001649 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001650};
1651
Gian Marco36a0a462018-01-12 10:21:40 +00001652/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1653 *
1654 * The matrix A can only be reshaped through @ref CLGEMMInterleave4x4Kernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1655 * Note: Optionally just for @ref CLGEMMInterleave4x4Kernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
1656 *
giuros018b6b4a92018-12-18 19:01:33 +00001657 * The matrix B can only be reshaped through @ref CLGEMMReshapeRHSMatrixKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1658 * Note: Optionally just for @ref CLGEMMReshapeRHSMatrixKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
Gian Marco36a0a462018-01-12 10:21:40 +00001659 *
1660 */
1661class GEMMReshapeInfo final
1662{
1663public:
1664 /** Default constructor */
1665 GEMMReshapeInfo()
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001666 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001667 {
1668 }
1669 /** Constructor
1670 *
1671 * @param[in] m Number of matrix A rows
1672 * @param[in] n Number of matrix B columns
1673 * @param[in] k Number of matrix A columns or matrix B rows
1674 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1675 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001676 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel.
1677 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001678 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1679 * to perform 1x1 convolutions with the NHWC data layout)
Gian Marco36a0a462018-01-12 10:21:40 +00001680 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001681 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001682 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
1683 _reinterpret_input_as_3d(reinterpret_input_as_3d)
Gian Marco36a0a462018-01-12 10:21:40 +00001684 {
1685 }
1686 /** Number of matrix A rows
1687 *
1688 * @return the number of matrix A rows
1689 */
1690 int m() const
1691 {
1692 return _m;
1693 }
1694 /** Number of matrix B columns
1695 *
1696 * @return the number of matrix B columns
1697 */
1698 int n() const
1699 {
1700 return _n;
1701 }
1702 /** Number of matrix A columns or matrix B rows
1703 *
1704 * @return the number of matrix A columns or matrix B rows
1705 */
1706 int k() const
1707 {
1708 return _k;
1709 }
1710 /** Multiplication factor for the width of the 1xW transposed block
1711 *
1712 * @return the multiplication factor for the width of the 1xW transposed block
1713 */
1714 int mult_transpose1xW_width() const
1715 {
1716 return _mult_transpose1xW_width;
1717 }
1718 /** Multiplication factor for the height of the 4x4 interleaved block
1719 *
1720 * @return the multiplication factor for the height of the 4x4 interleaved block
1721 */
1722 int mult_interleave4x4_height() const
1723 {
1724 return _mult_interleave4x4_height;
1725 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001726 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1727 *
1728 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1729 * m = depth_output_gemm3d * output_height
1730 *
1731 * @return the depth of the output tensor to be used with the GEMM3D kernel
1732 */
1733 int depth_output_gemm3d() const
1734 {
1735 return _depth_output_gemm3d;
1736 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001737 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1738 *
1739 * @return True if the input tensor has to be reinterpreted as 3D tensor
1740 */
1741 bool reinterpret_input_as_3d() const
1742 {
1743 return _reinterpret_input_as_3d;
1744 };
Gian Marco36a0a462018-01-12 10:21:40 +00001745
1746private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001747 const int _m;
1748 const int _n;
1749 const int _k;
1750 const int _mult_transpose1xW_width;
1751 const int _mult_interleave4x4_height;
1752 const int _depth_output_gemm3d;
1753 const bool _reinterpret_input_as_3d;
Gian Marco36a0a462018-01-12 10:21:40 +00001754};
1755
giuros016d109962019-01-07 17:47:19 +00001756struct DepthwiseConvolutionReshapeInfo
1757{
1758 unsigned int c0{ 1 }; /**< Number of channels processed by the depth-wise convolution */
1759 bool transpose{ false }; /**< True if the block MxC0 (where M is the area of the filter i.e. KwxKh) has to be transposed */
1760};
1761
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001762/** GEMMLowp output stage type */
1763enum class GEMMLowpOutputStageType
1764{
1765 NONE, /**< No quantization to uint8 */
1766 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1767 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1768 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1769};
1770
1771/** GEMMLowp output stage info */
1772struct GEMMLowpOutputStageInfo
1773{
1774 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1775 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1776 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1777 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1778 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1779 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1780};
1781
Gian Marco Iodice5ba5e092018-12-06 17:13:09 +00001782/** GEMM LHS (Left Hand Side) matrix information */
1783struct GEMMLHSMatrixInfo
1784{
1785 unsigned int m0{ 1 }; /**< Number of rows processed by the matrix multiplication */
1786 unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
1787 unsigned int v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
1788 bool transpose{ true }; /**< True if the (m0xk0) block has to be transposed before been stored */
1789 bool interleave{ true }; /**< True if the v0 (m0xk0) blocks have to be interleaved in the output row */
1790};
1791
Gian Marco Iodice3b0a2652018-12-07 11:18:09 +00001792/** GEMM RHS (Right Hand Side) matrix information */
1793struct GEMMRHSMatrixInfo
1794{
1795 unsigned int n0{ 1 }; /**< Number of columns processed by the matrix multiplication */
1796 unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
1797 unsigned int h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
1798 bool transpose{ true }; /**< True if the (k0xn0) block has to be transposed before been stored */
1799 bool interleave{ true }; /**< True if the h0 (k0xn0) blocks have to be interleaved in the output row */
1800};
1801
Gian Marco36a0a462018-01-12 10:21:40 +00001802/** GEMM information class. This class stores the necessary information to compute GEMM functions
1803 *
1804 * This object also contains the information about how matrix A and matrix B have been reshaped
1805 *
1806 */
Chunosov5124be52017-11-22 20:42:13 +07001807class GEMMInfo
1808{
1809public:
1810 /** Default constructor */
1811 GEMMInfo()
Anthony Barbier08a45172018-11-30 17:20:26 +00001812 : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(true), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false), _retain_internal_weights(false), _gemmlowp_output_stage(),
1813 _fp_mixed_precision(false)
Chunosov5124be52017-11-22 20:42:13 +07001814 {
1815 }
1816 /** Constructor
1817 *
1818 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1819 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1820 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001821 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001822 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001823 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1824 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001825 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001826 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001827 * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001828 *
Chunosov5124be52017-11-22 20:42:13 +07001829 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001830 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001831 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001832 : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001833 _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage), _fp_mixed_precision(fp_mixed_precision)
Chunosov5124be52017-11-22 20:42:13 +07001834 {
1835 }
1836 /** Flag which specifies if the matrix A has been reshaped
1837 *
1838 * @return True if the matrix A has been reshaped
1839 */
1840 bool is_a_reshaped() const
1841 {
1842 return _is_a_reshaped;
1843 };
1844 /** Flag which specifies if the matrix B has been reshaped
1845 *
1846 * @return True if the matrix B has been reshaped
1847 */
1848 bool is_b_reshaped() const
1849 {
1850 return _is_b_reshaped;
1851 };
1852 /** Flag which specifies if the reshape of matrix B should executed only for the first
1853 *
1854 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1855 *
1856 * @return True if the reshaped of matrix B happens only for the first run
1857 */
1858 bool reshape_b_only_on_first_run() const
1859 {
1860 return _reshape_b_only_on_first_run;
1861 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001862 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001863 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001864 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001865 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001866 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001867 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001868 return _depth_output_gemm3d;
1869 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001870 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1871 *
1872 * @return True if the input tensor has to be reinterpreted as 3D tensor
1873 */
1874 bool reinterpret_input_as_3d() const
1875 {
1876 return _reinterpret_input_as_3d;
1877 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001878 /** Flag which specifies if the weights tensor has to be retained from previous run
1879 *
1880 * @return True if the weights tensor has to be retained
1881 */
1882 bool retain_internal_weights() const
1883 {
1884 return _retain_internal_weights;
1885 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001886 /** GEMMLowp output stage
1887 *
1888 * @return the GEMMLowp output stage info
1889 */
1890 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1891 {
1892 return _gemmlowp_output_stage;
1893 };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001894 /** Flag which specifies if a wider accumulator should be used.
1895 *
1896 * @return True if a wider accumulator has to be used
1897 */
1898 bool fp_mixed_precision() const
1899 {
1900 return _fp_mixed_precision;
1901 };
Chunosov5124be52017-11-22 20:42:13 +07001902
1903private:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001904 const bool _is_a_reshaped;
1905 const bool _is_b_reshaped;
1906 const bool _reshape_b_only_on_first_run;
1907 const int _depth_output_gemm3d;
1908 const bool _reinterpret_input_as_3d;
1909 const bool _retain_internal_weights;
1910 const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001911 const bool _fp_mixed_precision;
Chunosov5124be52017-11-22 20:42:13 +07001912};
1913
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001914/** Winograd information */
1915struct WinogradInfo
1916{
1917 /** Default constructor
1918 *
1919 * @param[in] output_tile_sz Width and height of the output tile
1920 * @param[in] kernel_sz Width and height of the kernel
1921 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1922 * @param[in] conv_info Convolution info (Pads, strides)
1923 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1924 */
1925 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1926 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1927 {
1928 }
1929
1930 Size2D output_tile_size{}; /**< Width and height of the output tile */
1931 Size2D kernel_size{}; /**< Width and height of the kernel*/
1932 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1933 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1934 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1935};
1936
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001937/** IO formatting information class*/
1938struct IOFormatInfo
1939{
1940 /** Precision type used when printing floating point numbers */
1941 enum class PrecisionType
1942 {
1943 Default, /**< Default precision to the one that the current stream has */
1944 Custom, /**< Custom precision specified by the user using the precision parameter */
1945 Full /**< The maximum precision of the floating point representation */
1946 };
1947
1948 /** Specifies the area to be printed, used by Tensor objects */
1949 enum class PrintRegion
1950 {
1951 ValidRegion, /**< Prints the valid region of the Tensor object */
1952 NoPadding, /**< Prints the Tensor object without the padding */
1953 Full /**< Print the tensor object including padding */
1954 };
1955
Alex Gildayc357c472018-03-21 13:54:09 +00001956 /** Construct a set of IO formatting information.
1957 *
1958 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1959 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1960 * @param[in] precision Precision value for float point numbers. Default: 10.
1961 * @param[in] align_columns Whether to align columns when printed. Default: true.
1962 * @param[in] element_delim Delimeter between elements. Default: " ".
1963 * @param[in] row_delim Delimenter between rows. Default: "\n".
1964 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001965 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1966 PrecisionType precision_type = PrecisionType::Default,
1967 unsigned int precision = 10,
1968 bool align_columns = true,
1969 std::string element_delim = " ",
1970 std::string row_delim = "\n")
1971 : print_region(print_region),
1972 precision_type(precision_type),
1973 precision(precision),
1974 element_delim(element_delim),
1975 row_delim(row_delim),
1976 align_columns(align_columns)
1977 {
1978 }
1979
Alex Gildayc357c472018-03-21 13:54:09 +00001980 /** Area to be printed by Tensor objects */
1981 PrintRegion print_region;
1982 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001983 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001984 /** Floating point precision */
1985 unsigned int precision;
1986 /** Element delimeter */
1987 std::string element_delim;
1988 /** Row delimeter */
1989 std::string row_delim;
1990 /** Align columns */
1991 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001992};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001993} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001994#endif /* __ARM_COMPUTE_TYPES_H__ */