blob: ef25dc4150c5825ac7d6b5470b990979f163349e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2016-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000028#include "arm_compute/core/QAsymm8.h"
29#include "arm_compute/core/Rounding.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000030#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000031#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010033#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000035#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include <cstddef>
37#include <cstdint>
38#include <string>
39#include <utility>
40
41namespace arm_compute
42{
Georgios Pinitas583137c2017-08-31 18:12:42 +010043/** 16-bit floating point type */
44using half = half_float::half;
45
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000046/** Permutation vector */
47using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010048/** Bidirectional strides */
49using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000050
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051/** Image colour formats */
52enum class Format
53{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070054 UNKNOWN, /**< Unknown image format */
55 U8, /**< 1 channel, 1 U8 per channel */
56 S16, /**< 1 channel, 1 S16 per channel */
57 U16, /**< 1 channel, 1 U16 per channel */
58 S32, /**< 1 channel, 1 S32 per channel */
59 U32, /**< 1 channel, 1 U32 per channel */
60 F16, /**< 1 channel, 1 F16 per channel */
61 F32, /**< 1 channel, 1 F32 per channel */
62 UV88, /**< 2 channel, 1 U8 per channel */
63 RGB888, /**< 3 channels, 1 U8 per channel */
64 RGBA8888, /**< 4 channels, 1 U8 per channel */
65 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
66 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
67 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
68 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
69 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
70 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071};
72
73/** Available data types */
74enum class DataType
75{
Alex Gildayc357c472018-03-21 13:54:09 +000076 UNKNOWN, /**< Unknown data type */
77 U8, /**< unsigned 8-bit number */
78 S8, /**< signed 8-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000079 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 U16, /**< unsigned 16-bit number */
81 S16, /**< signed 16-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000082 U32, /**< unsigned 32-bit number */
83 S32, /**< signed 32-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000084 U64, /**< unsigned 64-bit number */
85 S64, /**< signed 64-bit number */
86 F16, /**< 16-bit floating-point number */
87 F32, /**< 32-bit floating-point number */
88 F64, /**< 64-bit floating-point number */
89 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090};
91
Daniil Efremov02bf80d2017-11-22 00:26:51 +070092/** Available Sampling Policies */
93enum class SamplingPolicy
94{
95 CENTER, /**< Samples are taken at pixel center */
96 TOP_LEFT /**< Samples are taken at pixel top left corner */
97};
98
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099/** Constant value of the border pixels when using BorderMode::CONSTANT */
100constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
101
Alex Gildayc357c472018-03-21 13:54:09 +0000102/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103constexpr float SCALE_PYRAMID_HALF = 0.5f;
104
Alex Gildayc357c472018-03-21 13:54:09 +0000105/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
107
Georgios Pinitas4074c992018-01-30 18:13:46 +0000108/** Supported tensor data layouts */
109enum class DataLayout
110{
Alex Gildayc357c472018-03-21 13:54:09 +0000111 UNKNOWN, /**< Unknown data layout */
112 NCHW, /**< Num samples, channels, height, width */
113 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000114};
115
Isabella Gottardid17a6772018-02-27 17:41:55 +0000116/** Supported tensor data layout dimensions */
117enum class DataLayoutDimension
118{
Alex Gildayc357c472018-03-21 13:54:09 +0000119 CHANNEL, /**< channel */
120 HEIGHT, /**< height */
121 WIDTH, /**< width */
122 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000123};
124
Michel Iwaniec00633802017-10-12 14:14:15 +0100125/** Quantization settings (used for QASYMM8 data type) */
126struct QuantizationInfo
127{
Alex Gildayc357c472018-03-21 13:54:09 +0000128 /** Default constructor */
Georgios Pinitasf8d8f3a2018-06-06 17:57:04 +0100129 QuantizationInfo() noexcept
130 : scale(0.0f),
131 offset(0)
Michel Iwaniec00633802017-10-12 14:14:15 +0100132 {
133 }
134
Alex Gildayc357c472018-03-21 13:54:09 +0000135 /** Construct quantization info.
136 *
137 * @param[in] scale Scale.
138 * @param[in] offset Offset.
139 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100140 QuantizationInfo(float scale, int offset)
141 : scale(scale), offset(offset)
142 {
143 }
144
Alex Gildayc357c472018-03-21 13:54:09 +0000145 /** Check whether equal to a given quantization info.
146 *
147 * @param[in] other Other quantization info.
148 *
149 * @return True if the given quantization info is the same.
150 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100151 bool operator==(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700152 {
153 return scale == other.scale && offset == other.offset;
154 }
155
Alex Gildayc357c472018-03-21 13:54:09 +0000156 /** Check whether not equal to a given quantization info.
157 *
158 * @param[in] other Other quantization info.
159 *
160 * @return True if the given quantization info is not the same.
161 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100162 bool operator!=(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700163 {
164 return !(*this == other);
165 }
166
Michel Iwaniec00633802017-10-12 14:14:15 +0100167 float scale; /**< scale */
168 int offset; /**< offset */
169
Alex Gildayc357c472018-03-21 13:54:09 +0000170 /** Quantizes a value using the scale/offset in this QuantizationInfo
171 *
172 * @param[in] value Value to quantize.
173 * @param[in] rounding_policy Policy to use when rounding.
174 *
175 * @return the quantized value.
176 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000177 qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100178 {
179 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000180 return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
Michel Iwaniec00633802017-10-12 14:14:15 +0100181 }
182
Alex Gildayc357c472018-03-21 13:54:09 +0000183 /** Dequantizes a value using the scale/offset in this QuantizationInfo
184 *
185 * @param[in] value Value to dequantize.
186 *
187 * @return the original value before quantization.
188 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000189 float dequantize(qasymm8_t value) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100190 {
191 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000192 return scvt_f32_qasymm8(value, scale, offset);
Michel Iwaniec00633802017-10-12 14:14:15 +0100193 }
194
Alex Gildayc357c472018-03-21 13:54:09 +0000195 /** Indicates whether this QuantizationInfo has valid settings or not
196 *
197 * @return True if the this has invalid settings.
198 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100199 bool empty() const
200 {
201 return scale == 0;
202 }
203};
204
Alex Gildayc357c472018-03-21 13:54:09 +0000205/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100206struct ValidRegion
207{
Alex Gildayc357c472018-03-21 13:54:09 +0000208 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100209 ValidRegion()
210 : anchor{}, shape{}
211 {
212 }
213
Alex Gildayc357c472018-03-21 13:54:09 +0000214 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100215 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000216 /** Allow instances of this class to be move constructed */
217 ValidRegion(ValidRegion &&) = default;
218 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100219 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000220 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100221 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000222 /** Default destructor */
223 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100224
Alex Gildayc357c472018-03-21 13:54:09 +0000225 /** Constructor for a valid region with default number of dimensions
226 *
227 * @param[in] an_anchor Anchor for the start of the valid region.
228 * @param[in] a_shape Shape of the valid region.
229 *
230 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000231 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
232 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100233 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000234 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
235 }
236
Alex Gildayc357c472018-03-21 13:54:09 +0000237 /** Constructor for a valid region with specified number of dimensions
238 *
239 * @param[in] an_anchor Anchor for the start of the valid region.
240 * @param[in] a_shape Shape of the valid region.
241 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
242 *
243 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000244 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
245 : anchor{ an_anchor }, shape{ a_shape }
246 {
247 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
248 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100249 }
250
251 /** Return the start of the valid region for the given dimension @p d */
252 int start(unsigned int d) const
253 {
254 return anchor[d];
255 }
256
257 /** Return the end of the valid region for the given dimension @p d */
258 int end(unsigned int d) const
259 {
260 return anchor[d] + shape[d];
261 }
262
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000263 /** Accessor to set the value of anchor and shape for one of the dimensions.
264 *
265 * @param[in] dimension Dimension for which the value is set.
266 * @param[in] start Value to be set in anchor for the dimension.
267 * @param[in] size Value to be set in shape for the dimension.
268 *
269 * @return *this.
270 */
271 ValidRegion &set(size_t dimension, int start, size_t size)
272 {
273 anchor.set(dimension, start);
274 shape.set(dimension, size);
275 return *this;
276 }
277
Alex Gildayc357c472018-03-21 13:54:09 +0000278 Coordinates anchor; /**< Anchor for the start of the valid region. */
279 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100280};
281
282/** Methods available to handle borders */
283enum class BorderMode
284{
285 UNDEFINED, /**< Borders are left undefined */
286 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
287 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
288};
289
290/** Container for 2D border size */
291struct BorderSize
292{
293 /** Empty border, i.e. no border */
294 constexpr BorderSize()
295 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
296 {
297 }
298
299 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100300 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100301 : top{ size }, right{ size }, bottom{ size }, left{ size }
302 {
303 }
304
305 /** Border with same size for top/bottom and left/right */
306 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
307 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
308 {
309 }
310
311 /** Border with different sizes */
312 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
313 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
314 {
315 }
316
317 /** Check if the entire border is zero */
318 constexpr bool empty() const
319 {
320 return top == 0 && right == 0 && bottom == 0 && left == 0;
321 }
322
323 /** Check if the border is the same size on all sides */
324 constexpr bool uniform() const
325 {
326 return top == right && top == bottom && top == left;
327 }
328
Alex Gildayc357c472018-03-21 13:54:09 +0000329 /** Scale this border size.
330 *
331 * @param[in] scale Scale to multiply border size by.
332 *
333 * @return *this.
334 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100335 BorderSize &operator*=(float scale)
336 {
337 top *= scale;
338 right *= scale;
339 bottom *= scale;
340 left *= scale;
341
342 return *this;
343 }
344
Alex Gildayc357c472018-03-21 13:54:09 +0000345 /** Scale a copy of this border size.
346 *
347 * @param[in] scale Scale to multiply border size by.
348 *
349 * @return a scaled copy of this.
350 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100351 BorderSize operator*(float scale)
352 {
353 BorderSize size = *this;
354 size *= scale;
355
356 return size;
357 }
358
Alex Gildayc357c472018-03-21 13:54:09 +0000359 /** Limit this border size.
360 *
361 * @param[in] limit Border size to limit this border size to.
362 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100363 void limit(const BorderSize &limit)
364 {
365 top = std::min(top, limit.top);
366 right = std::min(right, limit.right);
367 bottom = std::min(bottom, limit.bottom);
368 left = std::min(left, limit.left);
369 }
370
Alex Gildayc357c472018-03-21 13:54:09 +0000371 unsigned int top; /**< top of the border */
372 unsigned int right; /**< right of the border */
373 unsigned int bottom; /**< bottom of the border */
374 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100375};
376
Alex Gildayc357c472018-03-21 13:54:09 +0000377/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100378using PaddingSize = BorderSize;
379
380/** Policy to handle overflow */
381enum class ConvertPolicy
382{
383 WRAP, /**< Wrap around */
384 SATURATE /**< Saturate */
385};
386
387/** Interpolation method */
388enum class InterpolationPolicy
389{
390 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
391 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
392 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
393};
394
395/** Bilinear Interpolation method used by LKTracker */
396enum class BilinearInterpolation
397{
Alex Gildayc357c472018-03-21 13:54:09 +0000398 BILINEAR_OLD_NEW, /**< Old-new method */
399 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100400};
401
402/** Threshold mode */
403enum class ThresholdType
404{
405 BINARY, /**< Threshold with one value */
406 RANGE /**< Threshold with two values*/
407};
408
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100409/** Termination criteria */
410enum class Termination
411{
Alex Gildayc357c472018-03-21 13:54:09 +0000412 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
413 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
414 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100415};
416
417/** Magnitude calculation type. */
418enum class MagnitudeType
419{
420 L1NORM, /**< L1 normalization type */
421 L2NORM /**< L2 normalization type */
422};
423
424/** Phase calculation type.
425 *
426 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
427 */
428enum class PhaseType
429{
430 SIGNED, /**< Angle range: [0, 360] */
431 UNSIGNED /**< Angle range: [0, 180] */
432};
433
434/** Keypoint type */
435struct KeyPoint
436{
437 int32_t x{ 0 }; /**< X coordinates */
438 int32_t y{ 0 }; /**< Y coordinates */
439 float strength{ 0.f }; /**< Strength of the point */
440 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
441 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
442 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
443 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
444};
445
Alex Gildayc357c472018-03-21 13:54:09 +0000446/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100447using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
448
449/** Rectangle type */
450struct Rectangle
451{
452 uint16_t x; /**< Top-left x coordinate */
453 uint16_t y; /**< Top-left y coordinate */
454 uint16_t width; /**< Width of the rectangle */
455 uint16_t height; /**< Height of the rectangle */
456};
457
458/** Coordinate type */
459struct Coordinates2D
460{
461 int32_t x; /**< X coordinates */
462 int32_t y; /**< Y coordinates */
463};
464
465/** Coordinate type */
466struct Coordinates3D
467{
468 uint32_t x; /**< X coordinates */
469 uint32_t y; /**< Y coordinates */
470 uint32_t z; /**< Z coordinates */
471};
472
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100473/** Padding information as a pair of unsigned int start/end */
474using PaddingInfo = std::pair<uint32_t, uint32_t>;
475
476/** List of padding information */
477using PaddingList = std::vector<PaddingInfo>;
478
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100479/** Region of interest */
480struct ROI
481{
482 Rectangle rect; /**< Rectangle specifying the region of interest */
483 uint16_t batch_idx; /**< The batch index of the region of interest */
484};
485
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100486/** Available channels */
487enum class Channel
488{
489 UNKNOWN, /** Unknown channel format */
490 C0, /**< First channel (used by formats with unknown channel types). */
491 C1, /**< Second channel (used by formats with unknown channel types). */
492 C2, /**< Third channel (used by formats with unknown channel types). */
493 C3, /**< Fourth channel (used by formats with unknown channel types). */
494 R, /**< Red channel. */
495 G, /**< Green channel. */
496 B, /**< Blue channel. */
497 A, /**< Alpha channel. */
498 Y, /**< Luma channel. */
499 U, /**< Cb/U channel. */
500 V /**< Cr/V/Value channel. */
501};
502
503/** Available matrix patterns */
504enum class MatrixPattern
505{
506 BOX, /**< Box pattern matrix. */
507 CROSS, /**< Cross pattern matrix. */
508 DISK, /**< Disk pattern matrix. */
509 OTHER /**< Any other matrix pattern. */
510};
511
512/** Available non linear functions. */
513enum class NonLinearFilterFunction : unsigned
514{
515 MEDIAN = 0, /**< Non linear median filter. */
516 MIN = 1, /**< Non linear erode. */
517 MAX = 2, /**< Non linear dilate. */
518};
519
Georgios Pinitasd9769582017-08-03 10:19:40 +0100520/** Available reduction operations */
521enum class ReductionOperation
522{
523 SUM_SQUARE, /**< Sum of squares */
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100524 SUM, /**< Sum */
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100525 MEAN_SUM, /**< Mean of sum */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100526};
527
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100528/** The normalization type used for the normalization layer */
529enum class NormType
530{
531 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
532 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
533 CROSS_MAP /**< Normalization applied cross maps */
534};
535
536/** Normalization type for Histogram of Oriented Gradients (HOG) */
537enum class HOGNormType
538{
539 L2_NORM = 1, /**< L2-norm */
540 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
541 L1_NORM = 3 /**< L1 norm */
542};
543
544/** Detection window used for the object detection. The detection window keeps the following information:
545 *
546 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
547 * -# Index of the class used for evaluating which class the detection window belongs to
548 * -# Confidence value (score) obtained with the classifier
549 */
550struct DetectionWindow
551{
552 uint16_t x{ 0 }; /**< Top-left x coordinate */
553 uint16_t y{ 0 }; /**< Top-left y coordinate */
554 uint16_t width{ 0 }; /**< Width of the detection window */
555 uint16_t height{ 0 }; /**< Height of the detection window */
556 uint16_t idx_class{ 0 }; /**< Index of the class */
557 float score{ 0.f }; /**< Confidence value for the detection window */
558};
559
560/** Dimension rounding type when down-scaling on CNNs
561 * @note Used in pooling and convolution layer
562 */
563enum class DimensionRoundingType
564{
565 FLOOR, /**< Floor rounding */
566 CEIL /**< Ceil rounding */
567};
568
569/** Available pooling types */
570enum class PoolingType
571{
572 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100573 AVG, /**< Average Pooling */
574 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100575};
576
Michalis Spyrou2709d612018-09-19 09:46:47 +0100577/** Available non maxima suppression types */
578enum class NMSType
579{
580 LINEAR, /**< Linear NMS */
581 GAUSSIAN, /**< Gaussian NMS */
582 ORIGINAL /**< Original NMS */
583};
584
585/** BoxWithNonMaximaSuppressionLimit Information class */
586class BoxNMSLimitInfo final
587{
588public:
589 /** Constructor
590 *
591 * @param[in] score_thresh (Optional) Score threshold.
592 * @param[in] nms (Optional) NMS value
593 * @param[in] detections (Optional) Number of detections
594 * @param[in] soft_nms_enabled (Optional) Enable SoftNMS
595 * @param[in] soft_nms_method (Optional) Soft NMS method
596 * @param[in] soft_nms_sigma (Optional) Soft NMS sigma value
597 * @param[in] soft_nms_min_score_thres (Optional) Soft NMS minimum score threshold
giuros01cd96a262018-10-03 12:44:35 +0100598 * @param[in] suppress_size (Optional) Filter out boxes based on their size. Defaults to false
599 * @param[in] min_size (Optional) Smaller boxes than min_size will be filtered out. Defaults to 1
600 * @param[in] im_width (Optional) Boxes whose centers (on the x axis) is beyond im_width will be filtered. Defaults to 1
601 * @param[in] im_height (Optional) Boxes whose centers (on the y axis) is beyond im_height will be filtered. Defaults to 1
Michalis Spyrou2709d612018-09-19 09:46:47 +0100602 */
603 BoxNMSLimitInfo(float score_thresh = 0.05f, float nms = 0.3f,
604 int detections = 100, bool soft_nms_enabled = false,
605 NMSType soft_nms_method = NMSType::LINEAR,
giuros01cd96a262018-10-03 12:44:35 +0100606 float soft_nms_sigma = 0.5f, float soft_nms_min_score_thres = 0.001f, bool suppress_size = false, float min_size = 1.0f, float im_width = 1.0f, float im_height = 1.0f)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100607 : _score_thresh(score_thresh), _nms(nms), _detections_per_im(detections), _soft_nms_enabled(soft_nms_enabled), _soft_nms_method(soft_nms_method), _soft_nms_sigma(soft_nms_sigma),
giuros01cd96a262018-10-03 12:44:35 +0100608 _soft_nms_min_score_thres(soft_nms_min_score_thres), _suppress_size(suppress_size), _min_size(min_size), _im_width(im_width), _im_height(im_height)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100609 {
610 }
611 /** Get the score threshold */
612 float score_thresh() const
613 {
614 return _score_thresh;
615 }
616 /** Get the NMS */
617 float nms() const
618 {
619 return _nms;
620 }
621 /** Get the number of detections */
622 int detections_per_im() const
623 {
624 return _detections_per_im;
625 }
626 /** Check if soft NMS is enabled */
627 bool soft_nms_enabled() const
628 {
629 return _soft_nms_enabled;
630 }
631 /** Get soft NMS method */
632 NMSType soft_nms_method() const
633 {
634 return _soft_nms_method;
635 }
636 /** Get soft NMS sigma */
637 float soft_nms_sigma() const
638 {
639 return _soft_nms_sigma;
640 }
641 /** Get soft nms min score threshold */
642 float soft_nms_min_score_thres() const
643 {
644 return _soft_nms_min_score_thres;
645 }
giuros01cd96a262018-10-03 12:44:35 +0100646 /** Get if NMS will suppress boxes based on their size/position */
647 bool suppress_size() const
648 {
649 return _suppress_size;
650 }
651 /** Get size suppression threshold */
652 float min_size() const
653 {
654 return _min_size;
655 }
656 /** Get image width (NMS may suppress boxes whose center sits beyond the image width) */
657 float im_width() const
658 {
659 return _im_width;
660 }
661 /** Get image height (NMS may suppress boxes whose center sits beyond the image height) */
662 float im_height() const
663 {
664 return _im_height;
665 }
Michalis Spyrou2709d612018-09-19 09:46:47 +0100666
667private:
668 float _score_thresh;
669 float _nms;
670 int _detections_per_im;
671 bool _soft_nms_enabled;
672 NMSType _soft_nms_method;
673 float _soft_nms_sigma;
674 float _soft_nms_min_score_thres;
giuros01cd96a262018-10-03 12:44:35 +0100675 bool _suppress_size;
676 float _min_size;
677 float _im_width;
678 float _im_height;
Michalis Spyrou2709d612018-09-19 09:46:47 +0100679};
680
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100681/** Padding and stride information class */
682class PadStrideInfo
683{
684public:
685 /** Constructor
686 *
687 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
688 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
689 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
690 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
691 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
692 */
693 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
694 unsigned int pad_x = 0, unsigned int pad_y = 0,
695 DimensionRoundingType round = DimensionRoundingType::FLOOR)
696 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100697 _pad_left(pad_x),
698 _pad_top(pad_y),
699 _pad_right(pad_x),
700 _pad_bottom(pad_y),
701 _round_type(round)
702 {
703 }
704 /** Constructor
705 *
706 * @param[in] stride_x Stride, in elements, across x.
707 * @param[in] stride_y Stride, in elements, across y.
708 * @param[in] pad_left Padding across x on the left, in elements.
709 * @param[in] pad_top Padding across y on the top, in elements.
710 * @param[in] pad_right Padding across x on the right, in elements.
711 * @param[in] pad_bottom Padding across y on the bottom, in elements.
712 * @param[in] round Dimensions rounding.
713 */
714 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
715 unsigned int pad_left, unsigned int pad_right,
716 unsigned int pad_top, unsigned int pad_bottom,
717 DimensionRoundingType round)
718 : _stride(std::make_pair(stride_x, stride_y)),
719 _pad_left(pad_left),
720 _pad_top(pad_top),
721 _pad_right(pad_right),
722 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100723 _round_type(round)
724 {
725 }
Alex Gildayc357c472018-03-21 13:54:09 +0000726 /** Get the stride.
727 *
728 * @return a pair: stride x, stride y.
729 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100730 std::pair<unsigned int, unsigned int> stride() const
731 {
732 return _stride;
733 }
Alex Gildayc357c472018-03-21 13:54:09 +0000734 /** Check whether the padding is symmetric.
735 *
736 * @return True if the padding is symmetric.
737 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000738 bool padding_is_symmetric() const
739 {
740 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
741 }
Alex Gildayc357c472018-03-21 13:54:09 +0000742 /** Get the padding.
743 *
744 * @note This should only be used when the padding is symmetric.
745 *
746 * @return a pair: padding left/right, padding top/bottom
747 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100748 std::pair<unsigned int, unsigned int> pad() const
749 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100750 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000751 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100752 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100753 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100754
Alex Gildayc357c472018-03-21 13:54:09 +0000755 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100756 unsigned int pad_left() const
757 {
758 return _pad_left;
759 }
Alex Gildayc357c472018-03-21 13:54:09 +0000760 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100761 unsigned int pad_right() const
762 {
763 return _pad_right;
764 }
Alex Gildayc357c472018-03-21 13:54:09 +0000765 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100766 unsigned int pad_top() const
767 {
768 return _pad_top;
769 }
Alex Gildayc357c472018-03-21 13:54:09 +0000770 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100771 unsigned int pad_bottom() const
772 {
773 return _pad_bottom;
774 }
775
Alex Gildayc357c472018-03-21 13:54:09 +0000776 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100777 DimensionRoundingType round() const
778 {
779 return _round_type;
780 }
781
Alex Gildayc357c472018-03-21 13:54:09 +0000782 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100783 bool has_padding() const
784 {
785 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
786 }
787
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100788private:
789 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100790 unsigned int _pad_left;
791 unsigned int _pad_top;
792 unsigned int _pad_right;
793 unsigned int _pad_bottom;
794
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100795 DimensionRoundingType _round_type;
796};
797
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100798/** Fully connected layer info */
799struct FullyConnectedLayerInfo
800{
801 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
802 bool transpose_weights{ true }; /**< Transpose weights if true. */
803 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
804 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100805
806 /** Sets the weights trained data layout
807 *
808 * @param[in] layout Data layout that the weights were trained with
809 *
810 * @return Updated object
811 */
812 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
813 {
814 weights_trained_layout = layout;
815 return *this;
816 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100817 /** Sets the transpose weights flag
818 *
819 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
820 *
821 * @return Updated object
822 */
823 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
824 {
825 transpose_weights = should_transpose_weights;
826 return *this;
827 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100828};
829
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100830/** PriorBox layer info */
831class PriorBoxLayerInfo final
832{
833public:
834 /** Default Constructor */
835 PriorBoxLayerInfo()
836 : _min_sizes(),
837 _variances(),
838 _offset(),
839 _flip(true),
840 _clip(false),
841 _max_sizes(),
842 _aspect_ratios(),
843 _img_size(),
844 _steps()
845 {
846 }
847 /** Constructor
848 *
849 * @param[in] min_sizes Min sizes vector.
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100850 * @param[in] variances Variances vector.
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100851 * @param[in] offset Offset value.
852 * @param[in] flip (Optional) Flip the aspect ratios.
853 * @param[in] clip (Optional) Clip coordinates so that they're within [0,1].
854 * @param[in] max_sizes (Optional) Max sizes vector.
855 * @param[in] aspect_ratios (Optional) Aspect ratios of the boxes.
856 * @param[in] img_size (Optional) Image size.
857 * @param[in] steps (Optional) Step values.
858 */
859 PriorBoxLayerInfo(const std::vector<float> &min_sizes, const std::vector<float> &variances, float offset, bool flip = true, bool clip = false,
Pablo Tello32521432018-11-15 14:43:10 +0000860 const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {},
861 const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100862 : _min_sizes(min_sizes),
863 _variances(variances),
864 _offset(offset),
865 _flip(flip),
866 _clip(clip),
867 _max_sizes(max_sizes),
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100868 _aspect_ratios(),
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100869 _img_size(img_size),
870 _steps(steps)
871 {
872 _aspect_ratios.push_back(1.);
873 for(unsigned int i = 0; i < aspect_ratios.size(); ++i)
874 {
875 float ar = aspect_ratios[i];
876 bool already_exist = false;
877 for(auto ar_new : _aspect_ratios)
878 {
879 if(fabs(ar - ar_new) < 1e-6)
880 {
881 already_exist = true;
882 break;
883 }
884 }
885 if(!already_exist)
886 {
887 _aspect_ratios.push_back(ar);
888 if(flip)
889 {
890 _aspect_ratios.push_back(1.f / ar);
891 }
892 }
893 }
894 }
895 /** Get min sizes. */
896 std::vector<float> min_sizes() const
897 {
898 return _min_sizes;
899 }
900 /** Get min variances. */
901 std::vector<float> variances() const
902 {
903 return _variances;
904 }
905 /** Get the step coordinates */
906 std::array<float, 2> steps() const
907 {
908 return _steps;
909 }
910 /** Get the image size coordinates */
911 Coordinates2D img_size() const
912 {
913 return _img_size;
914 }
915 /** Get the offset */
916 float offset() const
917 {
918 return _offset;
919 }
920 /** Get the flip value */
921 bool flip() const
922 {
923 return _flip;
924 }
925 /** Get the clip value */
926 bool clip() const
927 {
928 return _clip;
929 }
930 /** Get max sizes. */
931 std::vector<float> max_sizes() const
932 {
933 return _max_sizes;
934 }
935 /** Get aspect ratios. */
936 std::vector<float> aspect_ratios() const
937 {
938 return _aspect_ratios;
939 }
940
941private:
942 std::vector<float> _min_sizes;
943 std::vector<float> _variances;
944 float _offset;
945 bool _flip;
946 bool _clip;
947 std::vector<float> _max_sizes;
948 std::vector<float> _aspect_ratios;
949 Coordinates2D _img_size;
950 std::array<float, 2> _steps;
951};
952
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100953/** Pooling Layer Information class */
954class PoolingLayerInfo
955{
956public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000957 /** Default Constructor */
958 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000959 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000960 {
961 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100962 /** Default Constructor
963 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000964 * @param[in] pool_type Pooling type @ref PoolingType.
965 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100966 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000967 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
968 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
969 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100970 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000971 explicit PoolingLayerInfo(PoolingType pool_type,
972 unsigned int pool_size,
973 PadStrideInfo pad_stride_info = PadStrideInfo(),
974 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000975 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
976 {
977 }
978 /** Default Constructor
979 *
980 * @param[in] pool_type Pooling type @ref PoolingType.
981 * @param[in] pool_size Pooling size, in elements, across x and y.
982 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
983 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
984 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
985 * Defaults to false;
986 */
987 explicit PoolingLayerInfo(PoolingType pool_type,
988 Size2D pool_size,
989 PadStrideInfo pad_stride_info = PadStrideInfo(),
990 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000991 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
992 {
993 }
994 /** Default Constructor
995 *
996 * @note This constructor is used for global pooling
997 *
998 * @param[in] pool_type Pooling type @ref PoolingType.
999 */
1000 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001001 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001002 {
1003 }
Alex Gildayc357c472018-03-21 13:54:09 +00001004 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001005 PoolingType pool_type() const
1006 {
1007 return _pool_type;
1008 }
Alex Gildayc357c472018-03-21 13:54:09 +00001009 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001010 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001011 {
1012 return _pool_size;
1013 }
Alex Gildayc357c472018-03-21 13:54:09 +00001014 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001015 PadStrideInfo pad_stride_info() const
1016 {
1017 return _pad_stride_info;
1018 }
Alex Gildayc357c472018-03-21 13:54:09 +00001019 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001020 bool exclude_padding() const
1021 {
1022 return _exclude_padding;
1023 }
Alex Gildayc357c472018-03-21 13:54:09 +00001024 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001025 bool is_global_pooling() const
1026 {
1027 return _is_global_pooling;
1028 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001029
1030private:
1031 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001032 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001033 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001034 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001035 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001036};
1037
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001038/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +01001039class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001040{
1041public:
giuros0118870812018-09-13 09:31:40 +01001042 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001043 *
giuros0118870812018-09-13 09:31:40 +01001044 * @param[in] pooled_width Pooled width of the layer.
1045 * @param[in] pooled_height Pooled height of the layer.
1046 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
1047 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001048 */
giuros0118870812018-09-13 09:31:40 +01001049 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
1050 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001051 {
1052 }
Alex Gildayc357c472018-03-21 13:54:09 +00001053 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001054 unsigned int pooled_width() const
1055 {
1056 return _pooled_width;
1057 }
Alex Gildayc357c472018-03-21 13:54:09 +00001058 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001059 unsigned int pooled_height() const
1060 {
1061 return _pooled_height;
1062 }
Alex Gildayc357c472018-03-21 13:54:09 +00001063 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001064 float spatial_scale() const
1065 {
1066 return _spatial_scale;
1067 }
giuros0118870812018-09-13 09:31:40 +01001068 /** Get sampling ratio */
1069 unsigned int sampling_ratio() const
1070 {
1071 return _sampling_ratio;
1072 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001073
1074private:
1075 unsigned int _pooled_width;
1076 unsigned int _pooled_height;
1077 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +01001078 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001079};
1080
giuros01cd96a262018-10-03 12:44:35 +01001081/** Generate Proposals Information class */
1082class GenerateProposalsInfo
1083{
1084public:
1085 /** Constructor
1086 *
1087 * @param[in] im_width Width of the original image
1088 * @param[in] im_height Height of the original image
1089 * @param[in] im_scale Scale applied to the original image
1090 * @param[in] spatial_scale (Optional)Scale applied to the feature map. Defaults to 1.0
1091 * @param[in] pre_nms_topN (Optional)Number of the best scores to be selected from the transformations. Defaults to 6000.
1092 * @param[in] post_nms_topN (Optional)Number of the best scores to be selected from the NMS operation. Defaults to 300.
1093 * @param[in] nms_thres (Optional)NMS overlap threshold. Defaults to 0.7.
1094 * @param[in] min_size (Optional)Size used to validate the anchors produced. Defaults to 16.
1095 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region of interest). Defaults to 4.
1096 */
1097 GenerateProposalsInfo(float im_width, float im_height, float im_scale, float spatial_scale = 1.0, int pre_nms_topN = 6000, int post_nms_topN = 300, float nms_thres = 0.7, float min_size = 16.0,
1098 size_t values_per_roi = 4)
1099 : _im_height(im_height), _im_width(im_width), _im_scale(im_scale), _spatial_scale(spatial_scale), _pre_nms_topN(pre_nms_topN), _post_nms_topN(post_nms_topN), _nms_thres(nms_thres),
1100 _min_size(min_size), _values_per_roi(values_per_roi)
1101 {
1102 }
1103
1104 /* Get the original height */
1105 float im_height() const
1106 {
1107 return _im_height;
1108 }
1109 /* Get the original width */
1110 float im_width() const
1111 {
1112 return _im_width;
1113 }
1114 /* Get the image scale */
1115 float im_scale() const
1116 {
1117 return _im_scale;
1118 }
1119 /* Get the value of how many best scores to select (before NMS) */
1120 int pre_nms_topN() const
1121 {
1122 return _pre_nms_topN;
1123 }
1124 /* Get the value of how many best scores to select (after NMS) */
1125 int post_nms_topN() const
1126 {
1127 return _post_nms_topN;
1128 }
1129 /* Get the NMS overlap threshold */
1130 float nms_thres() const
1131 {
1132 return _nms_thres;
1133 }
1134 /* Get the minimal size */
1135 float min_size() const
1136 {
1137 return _min_size;
1138 }
1139 /* Get the spatial scale to be applied to the feature maps */
1140 float spatial_scale() const
1141 {
1142 return _spatial_scale;
1143 }
1144 /* Get the values used to represent a ROI(Region of interest)*/
1145 size_t values_per_roi() const
1146 {
1147 return _values_per_roi;
1148 }
1149
1150private:
1151 float _im_height;
1152 float _im_width;
1153 float _im_scale;
1154 float _spatial_scale;
1155 int _pre_nms_topN;
1156 int _post_nms_topN;
1157 float _nms_thres;
1158 float _min_size;
1159 size_t _values_per_roi;
1160};
1161
1162/** ComputeAnchors information class */
1163class ComputeAnchorsInfo
1164{
1165public:
1166 /** Constructor
1167 *
1168 * @param[in] feat_width Feature map width
1169 * @param[in] feat_height Feature map height
1170 * @param[in] spatial_scale Feature map scale
1171 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region Of Interest). Defaults to 4
1172 */
1173 ComputeAnchorsInfo(float feat_width, float feat_height, float spatial_scale, size_t values_per_roi = 4)
1174 : _feat_height(feat_height),
1175 _feat_width(feat_width),
1176 _spatial_scale(spatial_scale),
1177 _values_per_roi(values_per_roi)
1178 {
1179 }
1180
1181 /* Get the height of the feature map */
1182 float feat_height() const
1183 {
1184 return _feat_height;
1185 }
1186
1187 /* Get the width of the feature map */
1188 float feat_width() const
1189 {
1190 return _feat_width;
1191 }
1192
1193 /* Get the scale of the feature map */
1194 float spatial_scale() const
1195 {
1196 return _spatial_scale;
1197 }
1198
1199 /* Get the values used to represent a ROI(Region Of Interest)*/
1200 size_t values_per_roi() const
1201 {
1202 return _values_per_roi;
1203 }
1204
1205private:
1206 float _feat_height;
1207 float _feat_width;
1208 float _spatial_scale;
1209 size_t _values_per_roi;
1210};
1211
giuros01c04a0e82018-10-03 12:44:35 +01001212/** Bounding Box Transform information class */
giuros01d696cb62018-11-16 10:39:59 +00001213class BoundingBoxTransformInfo final
giuros01c04a0e82018-10-03 12:44:35 +01001214{
1215public:
1216 /** Constructor
1217 *
giuros01d696cb62018-11-16 10:39:59 +00001218 * @param[in] img_width Width of the original image
1219 * @param[in] img_height Height, of the original image
1220 * @param[in] scale Scale of the original image
1221 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
1222 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
1223 * @param[in] correct_transform_coords (Optional)Correct bounding box transform coordinates. Defaults to false
1224 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
giuros01c04a0e82018-10-03 12:44:35 +01001225 */
giuros01d696cb62018-11-16 10:39:59 +00001226 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { { 1.f, 1.f, 1.f, 1.f } }, bool correct_transform_coords =
1227 false,
1228 float bbox_xform_clip =
1229 4.135166556742356f)
1230 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _correct_transform_coords(correct_transform_coords), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
giuros01c04a0e82018-10-03 12:44:35 +01001231 {
1232 }
1233
1234 std::array<float, 4> weights() const
1235 {
1236 return _weights;
1237 }
1238
1239 float bbox_xform_clip() const
1240 {
1241 return _bbox_xform_clip;
1242 }
1243
1244 float img_height() const
1245 {
1246 return _img_height;
1247 }
1248
1249 float img_width() const
1250 {
1251 return _img_width;
1252 }
1253
1254 float scale() const
1255 {
1256 return _scale;
1257 }
1258
1259 bool apply_scale() const
1260 {
1261 return _apply_scale;
1262 }
1263
giuros01d696cb62018-11-16 10:39:59 +00001264 bool correct_transform_coords() const
1265 {
1266 return _correct_transform_coords;
1267 }
1268
giuros01c04a0e82018-10-03 12:44:35 +01001269private:
1270 float _img_width;
1271 float _img_height;
1272 float _scale;
1273 bool _apply_scale;
giuros01d696cb62018-11-16 10:39:59 +00001274 bool _correct_transform_coords;
giuros01c04a0e82018-10-03 12:44:35 +01001275 std::array<float, 4> _weights;
1276 float _bbox_xform_clip;
1277};
1278
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001279/** Activation Layer Information class */
1280class ActivationLayerInfo
1281{
1282public:
1283 /** Available activation functions */
1284 enum class ActivationFunction
1285 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001286 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
1287 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
1288 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
1289 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
1290 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
1291 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1292 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1293 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
1294 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
1295 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
1296 LINEAR /**< Linear ( \f$ f(x)= ax + b \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001297 };
1298
Giorgio Arena11674872018-02-07 15:38:12 +00001299 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001300 /** Default Constructor
1301 *
1302 * @param[in] f The activation function to use.
1303 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001304 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
1305 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001306 */
1307 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +00001308 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001309 {
1310 }
Alex Gildayc357c472018-03-21 13:54:09 +00001311 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001312 ActivationFunction activation() const
1313 {
1314 return _act;
1315 }
Alex Gildayc357c472018-03-21 13:54:09 +00001316 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001317 float a() const
1318 {
1319 return _a;
1320 }
Alex Gildayc357c472018-03-21 13:54:09 +00001321 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001322 float b() const
1323 {
1324 return _b;
1325 }
Alex Gildayc357c472018-03-21 13:54:09 +00001326 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +00001327 bool enabled() const
1328 {
1329 return _enabled;
1330 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001331
1332private:
Giorgio Arena11674872018-02-07 15:38:12 +00001333 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::LOGISTIC };
1334 float _a = {};
1335 float _b = {};
1336 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001337};
1338
1339/** Normalization Layer Information class */
1340class NormalizationLayerInfo
1341{
1342public:
1343 /** Default Constructor
1344 *
1345 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NORM_TYPE::CROSS_MAP
1346 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +00001347 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
1348 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
1349 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
1350 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
1351 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001352 */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001353 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
1354 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001355 {
1356 }
Alex Gildayc357c472018-03-21 13:54:09 +00001357 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001358 NormType type() const
1359 {
1360 return _type;
1361 }
Alex Gildayc357c472018-03-21 13:54:09 +00001362 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001363 uint32_t norm_size() const
1364 {
1365 return _norm_size;
1366 }
Alex Gildayc357c472018-03-21 13:54:09 +00001367 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001368 float alpha() const
1369 {
1370 return _alpha;
1371 }
Alex Gildayc357c472018-03-21 13:54:09 +00001372 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001373 float beta() const
1374 {
1375 return _beta;
1376 }
Alex Gildayc357c472018-03-21 13:54:09 +00001377 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001378 float kappa() const
1379 {
1380 return _kappa;
1381 }
Alex Gildayc357c472018-03-21 13:54:09 +00001382 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001383 bool is_cross_map() const
1384 {
1385 return _type == NormType::CROSS_MAP;
1386 }
Alex Gildayc357c472018-03-21 13:54:09 +00001387 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001388 bool is_in_map() const
1389 {
1390 return !is_cross_map();
1391 }
1392 /** Return the scaling factor of the normalization function.
1393 *
1394 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1395 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001396 *
1397 * @return The normalization scaling factor.
1398 */
1399 float scale_coeff() const
1400 {
1401 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001402 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001403 }
1404
1405private:
1406 NormType _type;
1407 uint32_t _norm_size;
1408 float _alpha;
1409 float _beta;
1410 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001411 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001412};
1413
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001414/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001415class WeightsInfo
1416{
1417public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001418 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001419 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001420 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001421 {
1422 }
1423 /** Constructor
1424 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001425 * @param[in] are_reshaped True if the weights have been reshaped
1426 * @param[in] kernel_width Kernel width.
1427 * @param[in] kernel_height Kernel height.
1428 * @param[in] num_kernels Number of convolution kernels.
1429 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001430 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001431 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1432 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001433 {
1434 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001435 /** Flag which specifies if the weights tensor has been reshaped.
1436 *
1437 * @return True if the weights tensors has been reshaped
1438 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001439 bool are_reshaped() const
1440 {
1441 return _are_reshaped;
1442 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001443 /** Return the number of convolution kernels
1444 *
1445 * @return The number of convolution kernels
1446 */
1447 unsigned int num_kernels() const
1448 {
1449 return _num_kernels;
1450 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001451 /** Return the width and height of the kernel
1452 *
1453 * @return The width and height of the kernel
1454 */
1455 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001456 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001457 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001458 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001459 bool retain_internal_weights() const
1460 {
1461 return _retain_internal_weights;
1462 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001463
1464private:
1465 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001466 const unsigned int _kernel_width;
1467 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001468 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001469 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001470};
1471
Gian Marco36a0a462018-01-12 10:21:40 +00001472/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1473 *
1474 * The matrix A can only be reshaped through @ref CLGEMMInterleave4x4Kernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1475 * Note: Optionally just for @ref CLGEMMInterleave4x4Kernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
1476 *
1477 * The matrix B can only be reshaped through @ref CLGEMMTranspose1xWKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1478 * Note: Optionally just for @ref CLGEMMTranspose1xWKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
1479 *
1480 */
1481class GEMMReshapeInfo final
1482{
1483public:
1484 /** Default constructor */
1485 GEMMReshapeInfo()
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001486 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001487 {
1488 }
1489 /** Constructor
1490 *
1491 * @param[in] m Number of matrix A rows
1492 * @param[in] n Number of matrix B columns
1493 * @param[in] k Number of matrix A columns or matrix B rows
1494 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1495 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001496 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel.
1497 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001498 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1499 * to perform 1x1 convolutions with the NHWC data layout)
Gian Marco36a0a462018-01-12 10:21:40 +00001500 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001501 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001502 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
1503 _reinterpret_input_as_3d(reinterpret_input_as_3d)
Gian Marco36a0a462018-01-12 10:21:40 +00001504 {
1505 }
1506 /** Number of matrix A rows
1507 *
1508 * @return the number of matrix A rows
1509 */
1510 int m() const
1511 {
1512 return _m;
1513 }
1514 /** Number of matrix B columns
1515 *
1516 * @return the number of matrix B columns
1517 */
1518 int n() const
1519 {
1520 return _n;
1521 }
1522 /** Number of matrix A columns or matrix B rows
1523 *
1524 * @return the number of matrix A columns or matrix B rows
1525 */
1526 int k() const
1527 {
1528 return _k;
1529 }
1530 /** Multiplication factor for the width of the 1xW transposed block
1531 *
1532 * @return the multiplication factor for the width of the 1xW transposed block
1533 */
1534 int mult_transpose1xW_width() const
1535 {
1536 return _mult_transpose1xW_width;
1537 }
1538 /** Multiplication factor for the height of the 4x4 interleaved block
1539 *
1540 * @return the multiplication factor for the height of the 4x4 interleaved block
1541 */
1542 int mult_interleave4x4_height() const
1543 {
1544 return _mult_interleave4x4_height;
1545 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001546 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1547 *
1548 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1549 * m = depth_output_gemm3d * output_height
1550 *
1551 * @return the depth of the output tensor to be used with the GEMM3D kernel
1552 */
1553 int depth_output_gemm3d() const
1554 {
1555 return _depth_output_gemm3d;
1556 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001557 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1558 *
1559 * @return True if the input tensor has to be reinterpreted as 3D tensor
1560 */
1561 bool reinterpret_input_as_3d() const
1562 {
1563 return _reinterpret_input_as_3d;
1564 };
Gian Marco36a0a462018-01-12 10:21:40 +00001565
1566private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001567 const int _m;
1568 const int _n;
1569 const int _k;
1570 const int _mult_transpose1xW_width;
1571 const int _mult_interleave4x4_height;
1572 const int _depth_output_gemm3d;
1573 const bool _reinterpret_input_as_3d;
Gian Marco36a0a462018-01-12 10:21:40 +00001574};
1575
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001576/** GEMMLowp output stage type */
1577enum class GEMMLowpOutputStageType
1578{
1579 NONE, /**< No quantization to uint8 */
1580 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1581 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1582 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1583};
1584
1585/** GEMMLowp output stage info */
1586struct GEMMLowpOutputStageInfo
1587{
1588 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1589 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1590 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1591 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1592 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1593 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1594};
1595
Gian Marco36a0a462018-01-12 10:21:40 +00001596/** GEMM information class. This class stores the necessary information to compute GEMM functions
1597 *
1598 * This object also contains the information about how matrix A and matrix B have been reshaped
1599 *
1600 */
Chunosov5124be52017-11-22 20:42:13 +07001601class GEMMInfo
1602{
1603public:
1604 /** Default constructor */
1605 GEMMInfo()
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001606 : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false), _retain_internal_weights(false),
1607 _gemmlowp_output_stage(), _fp_mixed_precision(false)
Chunosov5124be52017-11-22 20:42:13 +07001608 {
1609 }
1610 /** Constructor
1611 *
1612 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1613 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1614 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001615 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001616 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001617 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1618 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001619 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001620 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001621 * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001622 *
Chunosov5124be52017-11-22 20:42:13 +07001623 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001624 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001625 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001626 : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001627 _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage), _fp_mixed_precision(fp_mixed_precision)
Chunosov5124be52017-11-22 20:42:13 +07001628 {
1629 }
1630 /** Flag which specifies if the matrix A has been reshaped
1631 *
1632 * @return True if the matrix A has been reshaped
1633 */
1634 bool is_a_reshaped() const
1635 {
1636 return _is_a_reshaped;
1637 };
1638 /** Flag which specifies if the matrix B has been reshaped
1639 *
1640 * @return True if the matrix B has been reshaped
1641 */
1642 bool is_b_reshaped() const
1643 {
1644 return _is_b_reshaped;
1645 };
1646 /** Flag which specifies if the reshape of matrix B should executed only for the first
1647 *
1648 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1649 *
1650 * @return True if the reshaped of matrix B happens only for the first run
1651 */
1652 bool reshape_b_only_on_first_run() const
1653 {
1654 return _reshape_b_only_on_first_run;
1655 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001656 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001657 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001658 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001659 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001660 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001661 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001662 return _depth_output_gemm3d;
1663 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001664 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1665 *
1666 * @return True if the input tensor has to be reinterpreted as 3D tensor
1667 */
1668 bool reinterpret_input_as_3d() const
1669 {
1670 return _reinterpret_input_as_3d;
1671 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001672 /** Flag which specifies if the weights tensor has to be retained from previous run
1673 *
1674 * @return True if the weights tensor has to be retained
1675 */
1676 bool retain_internal_weights() const
1677 {
1678 return _retain_internal_weights;
1679 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001680 /** GEMMLowp output stage
1681 *
1682 * @return the GEMMLowp output stage info
1683 */
1684 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1685 {
1686 return _gemmlowp_output_stage;
1687 };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001688 /** Flag which specifies if a wider accumulator should be used.
1689 *
1690 * @return True if a wider accumulator has to be used
1691 */
1692 bool fp_mixed_precision() const
1693 {
1694 return _fp_mixed_precision;
1695 };
Chunosov5124be52017-11-22 20:42:13 +07001696
1697private:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001698 const bool _is_a_reshaped;
1699 const bool _is_b_reshaped;
1700 const bool _reshape_b_only_on_first_run;
1701 const int _depth_output_gemm3d;
1702 const bool _reinterpret_input_as_3d;
1703 const bool _retain_internal_weights;
1704 const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001705 const bool _fp_mixed_precision;
Chunosov5124be52017-11-22 20:42:13 +07001706};
1707
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001708/** Winograd information */
1709struct WinogradInfo
1710{
1711 /** Default constructor
1712 *
1713 * @param[in] output_tile_sz Width and height of the output tile
1714 * @param[in] kernel_sz Width and height of the kernel
1715 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1716 * @param[in] conv_info Convolution info (Pads, strides)
1717 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1718 */
1719 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1720 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1721 {
1722 }
1723
1724 Size2D output_tile_size{}; /**< Width and height of the output tile */
1725 Size2D kernel_size{}; /**< Width and height of the kernel*/
1726 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1727 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1728 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1729};
1730
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001731/** IO formatting information class*/
1732struct IOFormatInfo
1733{
1734 /** Precision type used when printing floating point numbers */
1735 enum class PrecisionType
1736 {
1737 Default, /**< Default precision to the one that the current stream has */
1738 Custom, /**< Custom precision specified by the user using the precision parameter */
1739 Full /**< The maximum precision of the floating point representation */
1740 };
1741
1742 /** Specifies the area to be printed, used by Tensor objects */
1743 enum class PrintRegion
1744 {
1745 ValidRegion, /**< Prints the valid region of the Tensor object */
1746 NoPadding, /**< Prints the Tensor object without the padding */
1747 Full /**< Print the tensor object including padding */
1748 };
1749
Alex Gildayc357c472018-03-21 13:54:09 +00001750 /** Construct a set of IO formatting information.
1751 *
1752 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1753 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1754 * @param[in] precision Precision value for float point numbers. Default: 10.
1755 * @param[in] align_columns Whether to align columns when printed. Default: true.
1756 * @param[in] element_delim Delimeter between elements. Default: " ".
1757 * @param[in] row_delim Delimenter between rows. Default: "\n".
1758 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001759 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1760 PrecisionType precision_type = PrecisionType::Default,
1761 unsigned int precision = 10,
1762 bool align_columns = true,
1763 std::string element_delim = " ",
1764 std::string row_delim = "\n")
1765 : print_region(print_region),
1766 precision_type(precision_type),
1767 precision(precision),
1768 element_delim(element_delim),
1769 row_delim(row_delim),
1770 align_columns(align_columns)
1771 {
1772 }
1773
Alex Gildayc357c472018-03-21 13:54:09 +00001774 /** Area to be printed by Tensor objects */
1775 PrintRegion print_region;
1776 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001777 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001778 /** Floating point precision */
1779 unsigned int precision;
1780 /** Element delimeter */
1781 std::string element_delim;
1782 /** Row delimeter */
1783 std::string row_delim;
1784 /** Align columns */
1785 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001786};
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001787
1788/** Available ConvolutionMethod*/
1789enum class ConvolutionMethod
1790{
1791 GEMM, /**< Convolution using GEMM */
1792 DIRECT, /**< Direct convolution */
1793 WINOGRAD /**< Convolution using Winograd */
1794};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001795} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001796#endif /* __ARM_COMPUTE_TYPES_H__ */