blob: 6833a66cd91aab222ed8fdf388a8dc6f628dae25 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2016-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000028#include "arm_compute/core/QAsymm8.h"
29#include "arm_compute/core/Rounding.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000030#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000031#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010033#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000035#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include <cstddef>
37#include <cstdint>
38#include <string>
39#include <utility>
40
41namespace arm_compute
42{
Georgios Pinitas583137c2017-08-31 18:12:42 +010043/** 16-bit floating point type */
44using half = half_float::half;
45
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000046/** Permutation vector */
47using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010048/** Bidirectional strides */
49using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000050
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051/** Image colour formats */
52enum class Format
53{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070054 UNKNOWN, /**< Unknown image format */
55 U8, /**< 1 channel, 1 U8 per channel */
56 S16, /**< 1 channel, 1 S16 per channel */
57 U16, /**< 1 channel, 1 U16 per channel */
58 S32, /**< 1 channel, 1 S32 per channel */
59 U32, /**< 1 channel, 1 U32 per channel */
60 F16, /**< 1 channel, 1 F16 per channel */
61 F32, /**< 1 channel, 1 F32 per channel */
62 UV88, /**< 2 channel, 1 U8 per channel */
63 RGB888, /**< 3 channels, 1 U8 per channel */
64 RGBA8888, /**< 4 channels, 1 U8 per channel */
65 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
66 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
67 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
68 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
69 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
70 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071};
72
73/** Available data types */
74enum class DataType
75{
Alex Gildayc357c472018-03-21 13:54:09 +000076 UNKNOWN, /**< Unknown data type */
77 U8, /**< unsigned 8-bit number */
78 S8, /**< signed 8-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000079 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 U16, /**< unsigned 16-bit number */
81 S16, /**< signed 16-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000082 U32, /**< unsigned 32-bit number */
83 S32, /**< signed 32-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000084 U64, /**< unsigned 64-bit number */
85 S64, /**< signed 64-bit number */
86 F16, /**< 16-bit floating-point number */
87 F32, /**< 32-bit floating-point number */
88 F64, /**< 64-bit floating-point number */
89 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090};
91
Daniil Efremov02bf80d2017-11-22 00:26:51 +070092/** Available Sampling Policies */
93enum class SamplingPolicy
94{
95 CENTER, /**< Samples are taken at pixel center */
96 TOP_LEFT /**< Samples are taken at pixel top left corner */
97};
98
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099/** Constant value of the border pixels when using BorderMode::CONSTANT */
100constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
101
Alex Gildayc357c472018-03-21 13:54:09 +0000102/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103constexpr float SCALE_PYRAMID_HALF = 0.5f;
104
Alex Gildayc357c472018-03-21 13:54:09 +0000105/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
107
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000108/** [DataLayout enum definition] **/
109
Georgios Pinitas4074c992018-01-30 18:13:46 +0000110/** Supported tensor data layouts */
111enum class DataLayout
112{
Alex Gildayc357c472018-03-21 13:54:09 +0000113 UNKNOWN, /**< Unknown data layout */
114 NCHW, /**< Num samples, channels, height, width */
115 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000116};
Vidhya Sudhan Loganathand646ae12018-11-19 15:18:20 +0000117/** [DataLayout enum definition] **/
Georgios Pinitas4074c992018-01-30 18:13:46 +0000118
Isabella Gottardid17a6772018-02-27 17:41:55 +0000119/** Supported tensor data layout dimensions */
120enum class DataLayoutDimension
121{
Alex Gildayc357c472018-03-21 13:54:09 +0000122 CHANNEL, /**< channel */
123 HEIGHT, /**< height */
124 WIDTH, /**< width */
125 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000126};
127
Georgios Pinitas7900a9e2018-11-23 11:44:58 +0000128/** Available ConvolutionMethod*/
129enum class ConvolutionMethod
130{
131 GEMM, /**< Convolution using GEMM */
132 DIRECT, /**< Direct convolution */
133 WINOGRAD /**< Convolution using Winograd */
134};
135
136/** Supported comparison operations */
137enum class ComparisonOperation
138{
139 Equal, /**< Equal comparison ( \f$ x == y \f$ ) */
140 NotEqual, /**< NotEqual comparison ( \f$ x != y \f$ ) */
141 Greater, /**< Greater comparison ( \f$ x > y \f$ ) */
142 GreaterEqual, /**< Greater equal comparison ( \f$ x >= y \f$ ) */
143 Less, /**< Less comparison ( \f$ x < y \f$ ) */
144 LessEqual /**< Less equal comparison ( \f$ x <= y \f$ ) */
145};
146
Michel Iwaniec00633802017-10-12 14:14:15 +0100147/** Quantization settings (used for QASYMM8 data type) */
148struct QuantizationInfo
149{
Alex Gildayc357c472018-03-21 13:54:09 +0000150 /** Default constructor */
Georgios Pinitasf8d8f3a2018-06-06 17:57:04 +0100151 QuantizationInfo() noexcept
152 : scale(0.0f),
153 offset(0)
Michel Iwaniec00633802017-10-12 14:14:15 +0100154 {
155 }
156
Alex Gildayc357c472018-03-21 13:54:09 +0000157 /** Construct quantization info.
158 *
159 * @param[in] scale Scale.
160 * @param[in] offset Offset.
161 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100162 QuantizationInfo(float scale, int offset)
163 : scale(scale), offset(offset)
164 {
165 }
166
Alex Gildayc357c472018-03-21 13:54:09 +0000167 /** Check whether equal to a given quantization info.
168 *
169 * @param[in] other Other quantization info.
170 *
171 * @return True if the given quantization info is the same.
172 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100173 bool operator==(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700174 {
175 return scale == other.scale && offset == other.offset;
176 }
177
Alex Gildayc357c472018-03-21 13:54:09 +0000178 /** Check whether not equal to a given quantization info.
179 *
180 * @param[in] other Other quantization info.
181 *
182 * @return True if the given quantization info is not the same.
183 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100184 bool operator!=(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700185 {
186 return !(*this == other);
187 }
188
Michel Iwaniec00633802017-10-12 14:14:15 +0100189 float scale; /**< scale */
190 int offset; /**< offset */
191
Alex Gildayc357c472018-03-21 13:54:09 +0000192 /** Quantizes a value using the scale/offset in this QuantizationInfo
193 *
194 * @param[in] value Value to quantize.
195 * @param[in] rounding_policy Policy to use when rounding.
196 *
197 * @return the quantized value.
198 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000199 qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100200 {
201 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000202 return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
Michel Iwaniec00633802017-10-12 14:14:15 +0100203 }
204
Alex Gildayc357c472018-03-21 13:54:09 +0000205 /** Dequantizes a value using the scale/offset in this QuantizationInfo
206 *
207 * @param[in] value Value to dequantize.
208 *
209 * @return the original value before quantization.
210 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000211 float dequantize(qasymm8_t value) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100212 {
213 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000214 return scvt_f32_qasymm8(value, scale, offset);
Michel Iwaniec00633802017-10-12 14:14:15 +0100215 }
216
Alex Gildayc357c472018-03-21 13:54:09 +0000217 /** Indicates whether this QuantizationInfo has valid settings or not
218 *
219 * @return True if the this has invalid settings.
220 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100221 bool empty() const
222 {
223 return scale == 0;
224 }
225};
226
Alex Gildayc357c472018-03-21 13:54:09 +0000227/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100228struct ValidRegion
229{
Alex Gildayc357c472018-03-21 13:54:09 +0000230 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100231 ValidRegion()
232 : anchor{}, shape{}
233 {
234 }
235
Alex Gildayc357c472018-03-21 13:54:09 +0000236 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100237 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000238 /** Allow instances of this class to be move constructed */
239 ValidRegion(ValidRegion &&) = default;
240 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100241 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000242 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100243 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000244 /** Default destructor */
245 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100246
Alex Gildayc357c472018-03-21 13:54:09 +0000247 /** Constructor for a valid region with default number of dimensions
248 *
249 * @param[in] an_anchor Anchor for the start of the valid region.
250 * @param[in] a_shape Shape of the valid region.
251 *
252 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000253 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
254 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100255 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000256 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
257 }
258
Alex Gildayc357c472018-03-21 13:54:09 +0000259 /** Constructor for a valid region with specified number of dimensions
260 *
261 * @param[in] an_anchor Anchor for the start of the valid region.
262 * @param[in] a_shape Shape of the valid region.
263 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
264 *
265 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000266 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
267 : anchor{ an_anchor }, shape{ a_shape }
268 {
269 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
270 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100271 }
272
273 /** Return the start of the valid region for the given dimension @p d */
274 int start(unsigned int d) const
275 {
276 return anchor[d];
277 }
278
279 /** Return the end of the valid region for the given dimension @p d */
280 int end(unsigned int d) const
281 {
282 return anchor[d] + shape[d];
283 }
284
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000285 /** Accessor to set the value of anchor and shape for one of the dimensions.
286 *
287 * @param[in] dimension Dimension for which the value is set.
288 * @param[in] start Value to be set in anchor for the dimension.
289 * @param[in] size Value to be set in shape for the dimension.
290 *
291 * @return *this.
292 */
293 ValidRegion &set(size_t dimension, int start, size_t size)
294 {
295 anchor.set(dimension, start);
296 shape.set(dimension, size);
297 return *this;
298 }
299
Alex Gildayc357c472018-03-21 13:54:09 +0000300 Coordinates anchor; /**< Anchor for the start of the valid region. */
301 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100302};
303
304/** Methods available to handle borders */
305enum class BorderMode
306{
307 UNDEFINED, /**< Borders are left undefined */
308 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
309 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
310};
311
312/** Container for 2D border size */
313struct BorderSize
314{
315 /** Empty border, i.e. no border */
316 constexpr BorderSize()
317 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
318 {
319 }
320
321 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100322 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100323 : top{ size }, right{ size }, bottom{ size }, left{ size }
324 {
325 }
326
327 /** Border with same size for top/bottom and left/right */
328 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
329 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
330 {
331 }
332
333 /** Border with different sizes */
334 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
335 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
336 {
337 }
338
339 /** Check if the entire border is zero */
340 constexpr bool empty() const
341 {
342 return top == 0 && right == 0 && bottom == 0 && left == 0;
343 }
344
345 /** Check if the border is the same size on all sides */
346 constexpr bool uniform() const
347 {
348 return top == right && top == bottom && top == left;
349 }
350
Alex Gildayc357c472018-03-21 13:54:09 +0000351 /** Scale this border size.
352 *
353 * @param[in] scale Scale to multiply border size by.
354 *
355 * @return *this.
356 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100357 BorderSize &operator*=(float scale)
358 {
359 top *= scale;
360 right *= scale;
361 bottom *= scale;
362 left *= scale;
363
364 return *this;
365 }
366
Alex Gildayc357c472018-03-21 13:54:09 +0000367 /** Scale a copy of this border size.
368 *
369 * @param[in] scale Scale to multiply border size by.
370 *
371 * @return a scaled copy of this.
372 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100373 BorderSize operator*(float scale)
374 {
375 BorderSize size = *this;
376 size *= scale;
377
378 return size;
379 }
380
Alex Gildayc357c472018-03-21 13:54:09 +0000381 /** Limit this border size.
382 *
383 * @param[in] limit Border size to limit this border size to.
384 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100385 void limit(const BorderSize &limit)
386 {
387 top = std::min(top, limit.top);
388 right = std::min(right, limit.right);
389 bottom = std::min(bottom, limit.bottom);
390 left = std::min(left, limit.left);
391 }
392
Alex Gildayc357c472018-03-21 13:54:09 +0000393 unsigned int top; /**< top of the border */
394 unsigned int right; /**< right of the border */
395 unsigned int bottom; /**< bottom of the border */
396 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100397};
398
Alex Gildayc357c472018-03-21 13:54:09 +0000399/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100400using PaddingSize = BorderSize;
401
402/** Policy to handle overflow */
403enum class ConvertPolicy
404{
405 WRAP, /**< Wrap around */
406 SATURATE /**< Saturate */
407};
408
409/** Interpolation method */
410enum class InterpolationPolicy
411{
412 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
413 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
414 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
415};
416
417/** Bilinear Interpolation method used by LKTracker */
418enum class BilinearInterpolation
419{
Alex Gildayc357c472018-03-21 13:54:09 +0000420 BILINEAR_OLD_NEW, /**< Old-new method */
421 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100422};
423
424/** Threshold mode */
425enum class ThresholdType
426{
427 BINARY, /**< Threshold with one value */
428 RANGE /**< Threshold with two values*/
429};
430
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100431/** Termination criteria */
432enum class Termination
433{
Alex Gildayc357c472018-03-21 13:54:09 +0000434 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
435 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
436 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100437};
438
439/** Magnitude calculation type. */
440enum class MagnitudeType
441{
442 L1NORM, /**< L1 normalization type */
443 L2NORM /**< L2 normalization type */
444};
445
446/** Phase calculation type.
447 *
448 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
449 */
450enum class PhaseType
451{
452 SIGNED, /**< Angle range: [0, 360] */
453 UNSIGNED /**< Angle range: [0, 180] */
454};
455
456/** Keypoint type */
457struct KeyPoint
458{
459 int32_t x{ 0 }; /**< X coordinates */
460 int32_t y{ 0 }; /**< Y coordinates */
461 float strength{ 0.f }; /**< Strength of the point */
462 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
463 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
464 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
465 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
466};
467
Alex Gildayc357c472018-03-21 13:54:09 +0000468/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100469using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
470
471/** Rectangle type */
472struct Rectangle
473{
474 uint16_t x; /**< Top-left x coordinate */
475 uint16_t y; /**< Top-left y coordinate */
476 uint16_t width; /**< Width of the rectangle */
477 uint16_t height; /**< Height of the rectangle */
478};
479
480/** Coordinate type */
481struct Coordinates2D
482{
483 int32_t x; /**< X coordinates */
484 int32_t y; /**< Y coordinates */
485};
486
487/** Coordinate type */
488struct Coordinates3D
489{
490 uint32_t x; /**< X coordinates */
491 uint32_t y; /**< Y coordinates */
492 uint32_t z; /**< Z coordinates */
493};
494
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100495/** Padding information as a pair of unsigned int start/end */
496using PaddingInfo = std::pair<uint32_t, uint32_t>;
497
498/** List of padding information */
499using PaddingList = std::vector<PaddingInfo>;
500
giuros013175fcf2018-11-21 09:59:17 +0000501/** Information to produce a tiled version of a Tensor */
502using Multiples = std::vector<uint32_t>;
503
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100504/** Region of interest */
505struct ROI
506{
507 Rectangle rect; /**< Rectangle specifying the region of interest */
508 uint16_t batch_idx; /**< The batch index of the region of interest */
509};
510
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100511/** Available channels */
512enum class Channel
513{
514 UNKNOWN, /** Unknown channel format */
515 C0, /**< First channel (used by formats with unknown channel types). */
516 C1, /**< Second channel (used by formats with unknown channel types). */
517 C2, /**< Third channel (used by formats with unknown channel types). */
518 C3, /**< Fourth channel (used by formats with unknown channel types). */
519 R, /**< Red channel. */
520 G, /**< Green channel. */
521 B, /**< Blue channel. */
522 A, /**< Alpha channel. */
523 Y, /**< Luma channel. */
524 U, /**< Cb/U channel. */
525 V /**< Cr/V/Value channel. */
526};
527
528/** Available matrix patterns */
529enum class MatrixPattern
530{
531 BOX, /**< Box pattern matrix. */
532 CROSS, /**< Cross pattern matrix. */
533 DISK, /**< Disk pattern matrix. */
534 OTHER /**< Any other matrix pattern. */
535};
536
537/** Available non linear functions. */
538enum class NonLinearFilterFunction : unsigned
539{
540 MEDIAN = 0, /**< Non linear median filter. */
541 MIN = 1, /**< Non linear erode. */
542 MAX = 2, /**< Non linear dilate. */
543};
544
Georgios Pinitasd9769582017-08-03 10:19:40 +0100545/** Available reduction operations */
546enum class ReductionOperation
547{
Michalis Spyrou7930db42018-11-22 17:36:28 +0000548 SUM_SQUARE, /**< Sum of squares */
549 SUM, /**< Sum */
550 MEAN_SUM, /**< Mean of sum */
551 ARG_IDX_MAX, /**< Index of the max value */
552 ARG_IDX_MIN /**< Index of the min value */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100553};
554
giuros01164a2722018-11-20 18:34:46 +0000555/** Available element-wise operations */
556enum class ArithmeticOperation
557{
558 ADD, /**< (x + y) */
559 SUB, /**< (x - y) */
560 DIV, /**< (x / y) */
561 MIN, /**< Min(x, y) */
562 MAX, /**< Max(x, y) */
563 SQUARED_DIFF, /**< (x - y)^2 */
564};
565
Michalis Spyroue9362622018-11-23 17:41:37 +0000566/** Available element wise unary operations */
567enum class ElementWiseUnary
568{
569 RSQRT, /**< Reverse square root */
570 EXP, /**< Exponential */
571};
572
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100573/** The normalization type used for the normalization layer */
574enum class NormType
575{
576 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
577 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
578 CROSS_MAP /**< Normalization applied cross maps */
579};
580
581/** Normalization type for Histogram of Oriented Gradients (HOG) */
582enum class HOGNormType
583{
584 L2_NORM = 1, /**< L2-norm */
585 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
586 L1_NORM = 3 /**< L1 norm */
587};
588
589/** Detection window used for the object detection. The detection window keeps the following information:
590 *
591 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
592 * -# Index of the class used for evaluating which class the detection window belongs to
593 * -# Confidence value (score) obtained with the classifier
594 */
595struct DetectionWindow
596{
597 uint16_t x{ 0 }; /**< Top-left x coordinate */
598 uint16_t y{ 0 }; /**< Top-left y coordinate */
599 uint16_t width{ 0 }; /**< Width of the detection window */
600 uint16_t height{ 0 }; /**< Height of the detection window */
601 uint16_t idx_class{ 0 }; /**< Index of the class */
602 float score{ 0.f }; /**< Confidence value for the detection window */
603};
604
605/** Dimension rounding type when down-scaling on CNNs
606 * @note Used in pooling and convolution layer
607 */
608enum class DimensionRoundingType
609{
610 FLOOR, /**< Floor rounding */
611 CEIL /**< Ceil rounding */
612};
613
614/** Available pooling types */
615enum class PoolingType
616{
617 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100618 AVG, /**< Average Pooling */
619 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100620};
621
Michalis Spyrou2709d612018-09-19 09:46:47 +0100622/** Available non maxima suppression types */
623enum class NMSType
624{
625 LINEAR, /**< Linear NMS */
626 GAUSSIAN, /**< Gaussian NMS */
627 ORIGINAL /**< Original NMS */
628};
629
630/** BoxWithNonMaximaSuppressionLimit Information class */
631class BoxNMSLimitInfo final
632{
633public:
634 /** Constructor
635 *
636 * @param[in] score_thresh (Optional) Score threshold.
637 * @param[in] nms (Optional) NMS value
638 * @param[in] detections (Optional) Number of detections
639 * @param[in] soft_nms_enabled (Optional) Enable SoftNMS
640 * @param[in] soft_nms_method (Optional) Soft NMS method
641 * @param[in] soft_nms_sigma (Optional) Soft NMS sigma value
642 * @param[in] soft_nms_min_score_thres (Optional) Soft NMS minimum score threshold
giuros01cd96a262018-10-03 12:44:35 +0100643 * @param[in] suppress_size (Optional) Filter out boxes based on their size. Defaults to false
644 * @param[in] min_size (Optional) Smaller boxes than min_size will be filtered out. Defaults to 1
645 * @param[in] im_width (Optional) Boxes whose centers (on the x axis) is beyond im_width will be filtered. Defaults to 1
646 * @param[in] im_height (Optional) Boxes whose centers (on the y axis) is beyond im_height will be filtered. Defaults to 1
Michalis Spyrou2709d612018-09-19 09:46:47 +0100647 */
648 BoxNMSLimitInfo(float score_thresh = 0.05f, float nms = 0.3f,
649 int detections = 100, bool soft_nms_enabled = false,
650 NMSType soft_nms_method = NMSType::LINEAR,
giuros01cd96a262018-10-03 12:44:35 +0100651 float soft_nms_sigma = 0.5f, float soft_nms_min_score_thres = 0.001f, bool suppress_size = false, float min_size = 1.0f, float im_width = 1.0f, float im_height = 1.0f)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100652 : _score_thresh(score_thresh), _nms(nms), _detections_per_im(detections), _soft_nms_enabled(soft_nms_enabled), _soft_nms_method(soft_nms_method), _soft_nms_sigma(soft_nms_sigma),
giuros01cd96a262018-10-03 12:44:35 +0100653 _soft_nms_min_score_thres(soft_nms_min_score_thres), _suppress_size(suppress_size), _min_size(min_size), _im_width(im_width), _im_height(im_height)
Michalis Spyrou2709d612018-09-19 09:46:47 +0100654 {
655 }
656 /** Get the score threshold */
657 float score_thresh() const
658 {
659 return _score_thresh;
660 }
661 /** Get the NMS */
662 float nms() const
663 {
664 return _nms;
665 }
666 /** Get the number of detections */
667 int detections_per_im() const
668 {
669 return _detections_per_im;
670 }
671 /** Check if soft NMS is enabled */
672 bool soft_nms_enabled() const
673 {
674 return _soft_nms_enabled;
675 }
676 /** Get soft NMS method */
677 NMSType soft_nms_method() const
678 {
679 return _soft_nms_method;
680 }
681 /** Get soft NMS sigma */
682 float soft_nms_sigma() const
683 {
684 return _soft_nms_sigma;
685 }
686 /** Get soft nms min score threshold */
687 float soft_nms_min_score_thres() const
688 {
689 return _soft_nms_min_score_thres;
690 }
giuros01cd96a262018-10-03 12:44:35 +0100691 /** Get if NMS will suppress boxes based on their size/position */
692 bool suppress_size() const
693 {
694 return _suppress_size;
695 }
696 /** Get size suppression threshold */
697 float min_size() const
698 {
699 return _min_size;
700 }
701 /** Get image width (NMS may suppress boxes whose center sits beyond the image width) */
702 float im_width() const
703 {
704 return _im_width;
705 }
706 /** Get image height (NMS may suppress boxes whose center sits beyond the image height) */
707 float im_height() const
708 {
709 return _im_height;
710 }
Michalis Spyrou2709d612018-09-19 09:46:47 +0100711
712private:
713 float _score_thresh;
714 float _nms;
715 int _detections_per_im;
716 bool _soft_nms_enabled;
717 NMSType _soft_nms_method;
718 float _soft_nms_sigma;
719 float _soft_nms_min_score_thres;
giuros01cd96a262018-10-03 12:44:35 +0100720 bool _suppress_size;
721 float _min_size;
722 float _im_width;
723 float _im_height;
Michalis Spyrou2709d612018-09-19 09:46:47 +0100724};
725
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100726/** Padding and stride information class */
727class PadStrideInfo
728{
729public:
730 /** Constructor
731 *
732 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
733 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
734 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
735 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
736 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
737 */
738 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
739 unsigned int pad_x = 0, unsigned int pad_y = 0,
740 DimensionRoundingType round = DimensionRoundingType::FLOOR)
741 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100742 _pad_left(pad_x),
743 _pad_top(pad_y),
744 _pad_right(pad_x),
745 _pad_bottom(pad_y),
746 _round_type(round)
747 {
748 }
749 /** Constructor
750 *
751 * @param[in] stride_x Stride, in elements, across x.
752 * @param[in] stride_y Stride, in elements, across y.
753 * @param[in] pad_left Padding across x on the left, in elements.
754 * @param[in] pad_top Padding across y on the top, in elements.
755 * @param[in] pad_right Padding across x on the right, in elements.
756 * @param[in] pad_bottom Padding across y on the bottom, in elements.
757 * @param[in] round Dimensions rounding.
758 */
759 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
760 unsigned int pad_left, unsigned int pad_right,
761 unsigned int pad_top, unsigned int pad_bottom,
762 DimensionRoundingType round)
763 : _stride(std::make_pair(stride_x, stride_y)),
764 _pad_left(pad_left),
765 _pad_top(pad_top),
766 _pad_right(pad_right),
767 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100768 _round_type(round)
769 {
770 }
Alex Gildayc357c472018-03-21 13:54:09 +0000771 /** Get the stride.
772 *
773 * @return a pair: stride x, stride y.
774 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100775 std::pair<unsigned int, unsigned int> stride() const
776 {
777 return _stride;
778 }
Alex Gildayc357c472018-03-21 13:54:09 +0000779 /** Check whether the padding is symmetric.
780 *
781 * @return True if the padding is symmetric.
782 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000783 bool padding_is_symmetric() const
784 {
785 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
786 }
Alex Gildayc357c472018-03-21 13:54:09 +0000787 /** Get the padding.
788 *
789 * @note This should only be used when the padding is symmetric.
790 *
791 * @return a pair: padding left/right, padding top/bottom
792 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100793 std::pair<unsigned int, unsigned int> pad() const
794 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100795 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000796 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100797 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100798 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100799
Alex Gildayc357c472018-03-21 13:54:09 +0000800 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100801 unsigned int pad_left() const
802 {
803 return _pad_left;
804 }
Alex Gildayc357c472018-03-21 13:54:09 +0000805 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100806 unsigned int pad_right() const
807 {
808 return _pad_right;
809 }
Alex Gildayc357c472018-03-21 13:54:09 +0000810 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100811 unsigned int pad_top() const
812 {
813 return _pad_top;
814 }
Alex Gildayc357c472018-03-21 13:54:09 +0000815 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100816 unsigned int pad_bottom() const
817 {
818 return _pad_bottom;
819 }
820
Alex Gildayc357c472018-03-21 13:54:09 +0000821 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100822 DimensionRoundingType round() const
823 {
824 return _round_type;
825 }
826
Alex Gildayc357c472018-03-21 13:54:09 +0000827 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100828 bool has_padding() const
829 {
830 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
831 }
832
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100833private:
834 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100835 unsigned int _pad_left;
836 unsigned int _pad_top;
837 unsigned int _pad_right;
838 unsigned int _pad_bottom;
839
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100840 DimensionRoundingType _round_type;
841};
842
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100843/** Fully connected layer info */
844struct FullyConnectedLayerInfo
845{
846 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
847 bool transpose_weights{ true }; /**< Transpose weights if true. */
848 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
849 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100850
851 /** Sets the weights trained data layout
852 *
853 * @param[in] layout Data layout that the weights were trained with
854 *
855 * @return Updated object
856 */
857 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
858 {
859 weights_trained_layout = layout;
860 return *this;
861 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100862 /** Sets the transpose weights flag
863 *
864 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
865 *
866 * @return Updated object
867 */
868 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
869 {
870 transpose_weights = should_transpose_weights;
871 return *this;
872 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100873};
874
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100875/** PriorBox layer info */
876class PriorBoxLayerInfo final
877{
878public:
879 /** Default Constructor */
880 PriorBoxLayerInfo()
881 : _min_sizes(),
882 _variances(),
883 _offset(),
884 _flip(true),
885 _clip(false),
886 _max_sizes(),
887 _aspect_ratios(),
888 _img_size(),
889 _steps()
890 {
891 }
892 /** Constructor
893 *
894 * @param[in] min_sizes Min sizes vector.
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100895 * @param[in] variances Variances vector.
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100896 * @param[in] offset Offset value.
897 * @param[in] flip (Optional) Flip the aspect ratios.
898 * @param[in] clip (Optional) Clip coordinates so that they're within [0,1].
899 * @param[in] max_sizes (Optional) Max sizes vector.
900 * @param[in] aspect_ratios (Optional) Aspect ratios of the boxes.
901 * @param[in] img_size (Optional) Image size.
902 * @param[in] steps (Optional) Step values.
903 */
904 PriorBoxLayerInfo(const std::vector<float> &min_sizes, const std::vector<float> &variances, float offset, bool flip = true, bool clip = false,
Pablo Tello32521432018-11-15 14:43:10 +0000905 const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {},
906 const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100907 : _min_sizes(min_sizes),
908 _variances(variances),
909 _offset(offset),
910 _flip(flip),
911 _clip(clip),
912 _max_sizes(max_sizes),
Michalis Spyrou721c4cb2018-09-04 15:27:25 +0100913 _aspect_ratios(),
Michalis Spyrou6c7c38e2018-08-29 16:28:11 +0100914 _img_size(img_size),
915 _steps(steps)
916 {
917 _aspect_ratios.push_back(1.);
918 for(unsigned int i = 0; i < aspect_ratios.size(); ++i)
919 {
920 float ar = aspect_ratios[i];
921 bool already_exist = false;
922 for(auto ar_new : _aspect_ratios)
923 {
924 if(fabs(ar - ar_new) < 1e-6)
925 {
926 already_exist = true;
927 break;
928 }
929 }
930 if(!already_exist)
931 {
932 _aspect_ratios.push_back(ar);
933 if(flip)
934 {
935 _aspect_ratios.push_back(1.f / ar);
936 }
937 }
938 }
939 }
940 /** Get min sizes. */
941 std::vector<float> min_sizes() const
942 {
943 return _min_sizes;
944 }
945 /** Get min variances. */
946 std::vector<float> variances() const
947 {
948 return _variances;
949 }
950 /** Get the step coordinates */
951 std::array<float, 2> steps() const
952 {
953 return _steps;
954 }
955 /** Get the image size coordinates */
956 Coordinates2D img_size() const
957 {
958 return _img_size;
959 }
960 /** Get the offset */
961 float offset() const
962 {
963 return _offset;
964 }
965 /** Get the flip value */
966 bool flip() const
967 {
968 return _flip;
969 }
970 /** Get the clip value */
971 bool clip() const
972 {
973 return _clip;
974 }
975 /** Get max sizes. */
976 std::vector<float> max_sizes() const
977 {
978 return _max_sizes;
979 }
980 /** Get aspect ratios. */
981 std::vector<float> aspect_ratios() const
982 {
983 return _aspect_ratios;
984 }
985
986private:
987 std::vector<float> _min_sizes;
988 std::vector<float> _variances;
989 float _offset;
990 bool _flip;
991 bool _clip;
992 std::vector<float> _max_sizes;
993 std::vector<float> _aspect_ratios;
994 Coordinates2D _img_size;
995 std::array<float, 2> _steps;
996};
997
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100998/** Pooling Layer Information class */
999class PoolingLayerInfo
1000{
1001public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001002 /** Default Constructor */
1003 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001004 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001005 {
1006 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001007 /** Default Constructor
1008 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001009 * @param[in] pool_type Pooling type @ref PoolingType.
1010 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001011 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001012 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1013 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1014 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001015 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001016 explicit PoolingLayerInfo(PoolingType pool_type,
1017 unsigned int pool_size,
1018 PadStrideInfo pad_stride_info = PadStrideInfo(),
1019 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001020 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1021 {
1022 }
1023 /** Default Constructor
1024 *
1025 * @param[in] pool_type Pooling type @ref PoolingType.
1026 * @param[in] pool_size Pooling size, in elements, across x and y.
1027 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
1028 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
1029 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
1030 * Defaults to false;
1031 */
1032 explicit PoolingLayerInfo(PoolingType pool_type,
1033 Size2D pool_size,
1034 PadStrideInfo pad_stride_info = PadStrideInfo(),
1035 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001036 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
1037 {
1038 }
1039 /** Default Constructor
1040 *
1041 * @note This constructor is used for global pooling
1042 *
1043 * @param[in] pool_type Pooling type @ref PoolingType.
1044 */
1045 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001046 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001047 {
1048 }
Alex Gildayc357c472018-03-21 13:54:09 +00001049 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001050 PoolingType pool_type() const
1051 {
1052 return _pool_type;
1053 }
Alex Gildayc357c472018-03-21 13:54:09 +00001054 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001055 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001056 {
1057 return _pool_size;
1058 }
Alex Gildayc357c472018-03-21 13:54:09 +00001059 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001060 PadStrideInfo pad_stride_info() const
1061 {
1062 return _pad_stride_info;
1063 }
Alex Gildayc357c472018-03-21 13:54:09 +00001064 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001065 bool exclude_padding() const
1066 {
1067 return _exclude_padding;
1068 }
Alex Gildayc357c472018-03-21 13:54:09 +00001069 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001070 bool is_global_pooling() const
1071 {
1072 return _is_global_pooling;
1073 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001074
1075private:
1076 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +00001077 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001078 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +00001079 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +00001080 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001081};
1082
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001083/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +01001084class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001085{
1086public:
giuros0118870812018-09-13 09:31:40 +01001087 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001088 *
giuros0118870812018-09-13 09:31:40 +01001089 * @param[in] pooled_width Pooled width of the layer.
1090 * @param[in] pooled_height Pooled height of the layer.
1091 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
1092 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001093 */
giuros0118870812018-09-13 09:31:40 +01001094 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
1095 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001096 {
1097 }
Alex Gildayc357c472018-03-21 13:54:09 +00001098 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001099 unsigned int pooled_width() const
1100 {
1101 return _pooled_width;
1102 }
Alex Gildayc357c472018-03-21 13:54:09 +00001103 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001104 unsigned int pooled_height() const
1105 {
1106 return _pooled_height;
1107 }
Alex Gildayc357c472018-03-21 13:54:09 +00001108 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001109 float spatial_scale() const
1110 {
1111 return _spatial_scale;
1112 }
giuros0118870812018-09-13 09:31:40 +01001113 /** Get sampling ratio */
1114 unsigned int sampling_ratio() const
1115 {
1116 return _sampling_ratio;
1117 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001118
1119private:
1120 unsigned int _pooled_width;
1121 unsigned int _pooled_height;
1122 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +01001123 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +01001124};
1125
giuros01cd96a262018-10-03 12:44:35 +01001126/** Generate Proposals Information class */
1127class GenerateProposalsInfo
1128{
1129public:
1130 /** Constructor
1131 *
1132 * @param[in] im_width Width of the original image
1133 * @param[in] im_height Height of the original image
1134 * @param[in] im_scale Scale applied to the original image
1135 * @param[in] spatial_scale (Optional)Scale applied to the feature map. Defaults to 1.0
1136 * @param[in] pre_nms_topN (Optional)Number of the best scores to be selected from the transformations. Defaults to 6000.
1137 * @param[in] post_nms_topN (Optional)Number of the best scores to be selected from the NMS operation. Defaults to 300.
1138 * @param[in] nms_thres (Optional)NMS overlap threshold. Defaults to 0.7.
1139 * @param[in] min_size (Optional)Size used to validate the anchors produced. Defaults to 16.
1140 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region of interest). Defaults to 4.
1141 */
1142 GenerateProposalsInfo(float im_width, float im_height, float im_scale, float spatial_scale = 1.0, int pre_nms_topN = 6000, int post_nms_topN = 300, float nms_thres = 0.7, float min_size = 16.0,
1143 size_t values_per_roi = 4)
1144 : _im_height(im_height), _im_width(im_width), _im_scale(im_scale), _spatial_scale(spatial_scale), _pre_nms_topN(pre_nms_topN), _post_nms_topN(post_nms_topN), _nms_thres(nms_thres),
1145 _min_size(min_size), _values_per_roi(values_per_roi)
1146 {
1147 }
1148
1149 /* Get the original height */
1150 float im_height() const
1151 {
1152 return _im_height;
1153 }
1154 /* Get the original width */
1155 float im_width() const
1156 {
1157 return _im_width;
1158 }
1159 /* Get the image scale */
1160 float im_scale() const
1161 {
1162 return _im_scale;
1163 }
1164 /* Get the value of how many best scores to select (before NMS) */
1165 int pre_nms_topN() const
1166 {
1167 return _pre_nms_topN;
1168 }
1169 /* Get the value of how many best scores to select (after NMS) */
1170 int post_nms_topN() const
1171 {
1172 return _post_nms_topN;
1173 }
1174 /* Get the NMS overlap threshold */
1175 float nms_thres() const
1176 {
1177 return _nms_thres;
1178 }
1179 /* Get the minimal size */
1180 float min_size() const
1181 {
1182 return _min_size;
1183 }
1184 /* Get the spatial scale to be applied to the feature maps */
1185 float spatial_scale() const
1186 {
1187 return _spatial_scale;
1188 }
1189 /* Get the values used to represent a ROI(Region of interest)*/
1190 size_t values_per_roi() const
1191 {
1192 return _values_per_roi;
1193 }
1194
1195private:
1196 float _im_height;
1197 float _im_width;
1198 float _im_scale;
1199 float _spatial_scale;
1200 int _pre_nms_topN;
1201 int _post_nms_topN;
1202 float _nms_thres;
1203 float _min_size;
1204 size_t _values_per_roi;
1205};
1206
1207/** ComputeAnchors information class */
1208class ComputeAnchorsInfo
1209{
1210public:
1211 /** Constructor
1212 *
1213 * @param[in] feat_width Feature map width
1214 * @param[in] feat_height Feature map height
1215 * @param[in] spatial_scale Feature map scale
1216 * @param[in] values_per_roi (Optional)Values used to represent a ROI(Region Of Interest). Defaults to 4
1217 */
1218 ComputeAnchorsInfo(float feat_width, float feat_height, float spatial_scale, size_t values_per_roi = 4)
1219 : _feat_height(feat_height),
1220 _feat_width(feat_width),
1221 _spatial_scale(spatial_scale),
1222 _values_per_roi(values_per_roi)
1223 {
1224 }
1225
1226 /* Get the height of the feature map */
1227 float feat_height() const
1228 {
1229 return _feat_height;
1230 }
1231
1232 /* Get the width of the feature map */
1233 float feat_width() const
1234 {
1235 return _feat_width;
1236 }
1237
1238 /* Get the scale of the feature map */
1239 float spatial_scale() const
1240 {
1241 return _spatial_scale;
1242 }
1243
1244 /* Get the values used to represent a ROI(Region Of Interest)*/
1245 size_t values_per_roi() const
1246 {
1247 return _values_per_roi;
1248 }
1249
1250private:
1251 float _feat_height;
1252 float _feat_width;
1253 float _spatial_scale;
1254 size_t _values_per_roi;
1255};
1256
giuros01c04a0e82018-10-03 12:44:35 +01001257/** Bounding Box Transform information class */
giuros01d696cb62018-11-16 10:39:59 +00001258class BoundingBoxTransformInfo final
giuros01c04a0e82018-10-03 12:44:35 +01001259{
1260public:
1261 /** Constructor
1262 *
giuros01d696cb62018-11-16 10:39:59 +00001263 * @param[in] img_width Width of the original image
1264 * @param[in] img_height Height, of the original image
1265 * @param[in] scale Scale of the original image
1266 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
1267 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
1268 * @param[in] correct_transform_coords (Optional)Correct bounding box transform coordinates. Defaults to false
1269 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
giuros01c04a0e82018-10-03 12:44:35 +01001270 */
giuros01d696cb62018-11-16 10:39:59 +00001271 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { { 1.f, 1.f, 1.f, 1.f } }, bool correct_transform_coords =
1272 false,
1273 float bbox_xform_clip =
1274 4.135166556742356f)
1275 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _correct_transform_coords(correct_transform_coords), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
giuros01c04a0e82018-10-03 12:44:35 +01001276 {
1277 }
1278
1279 std::array<float, 4> weights() const
1280 {
1281 return _weights;
1282 }
1283
1284 float bbox_xform_clip() const
1285 {
1286 return _bbox_xform_clip;
1287 }
1288
1289 float img_height() const
1290 {
1291 return _img_height;
1292 }
1293
1294 float img_width() const
1295 {
1296 return _img_width;
1297 }
1298
1299 float scale() const
1300 {
1301 return _scale;
1302 }
1303
1304 bool apply_scale() const
1305 {
1306 return _apply_scale;
1307 }
1308
giuros01d696cb62018-11-16 10:39:59 +00001309 bool correct_transform_coords() const
1310 {
1311 return _correct_transform_coords;
1312 }
1313
giuros01c04a0e82018-10-03 12:44:35 +01001314private:
1315 float _img_width;
1316 float _img_height;
1317 float _scale;
1318 bool _apply_scale;
giuros01d696cb62018-11-16 10:39:59 +00001319 bool _correct_transform_coords;
giuros01c04a0e82018-10-03 12:44:35 +01001320 std::array<float, 4> _weights;
1321 float _bbox_xform_clip;
1322};
1323
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001324/** Activation Layer Information class */
1325class ActivationLayerInfo
1326{
1327public:
1328 /** Available activation functions */
1329 enum class ActivationFunction
1330 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001331 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
1332 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
1333 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
1334 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
1335 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
1336 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1337 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
1338 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
1339 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
1340 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
1341 LINEAR /**< Linear ( \f$ f(x)= ax + b \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001342 };
1343
Giorgio Arena11674872018-02-07 15:38:12 +00001344 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001345 /** Default Constructor
1346 *
1347 * @param[in] f The activation function to use.
1348 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +01001349 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
1350 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001351 */
1352 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +00001353 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001354 {
1355 }
Alex Gildayc357c472018-03-21 13:54:09 +00001356 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001357 ActivationFunction activation() const
1358 {
1359 return _act;
1360 }
Alex Gildayc357c472018-03-21 13:54:09 +00001361 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001362 float a() const
1363 {
1364 return _a;
1365 }
Alex Gildayc357c472018-03-21 13:54:09 +00001366 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001367 float b() const
1368 {
1369 return _b;
1370 }
Alex Gildayc357c472018-03-21 13:54:09 +00001371 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +00001372 bool enabled() const
1373 {
1374 return _enabled;
1375 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001376
1377private:
Giorgio Arena11674872018-02-07 15:38:12 +00001378 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::LOGISTIC };
1379 float _a = {};
1380 float _b = {};
1381 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001382};
1383
1384/** Normalization Layer Information class */
1385class NormalizationLayerInfo
1386{
1387public:
1388 /** Default Constructor
1389 *
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001390 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NormType::CROSS_MAP
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001391 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +00001392 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
1393 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
1394 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
1395 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
1396 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001397 */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001398 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
1399 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001400 {
1401 }
Alex Gildayc357c472018-03-21 13:54:09 +00001402 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001403 NormType type() const
1404 {
1405 return _type;
1406 }
Alex Gildayc357c472018-03-21 13:54:09 +00001407 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001408 uint32_t norm_size() const
1409 {
1410 return _norm_size;
1411 }
Alex Gildayc357c472018-03-21 13:54:09 +00001412 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001413 float alpha() const
1414 {
1415 return _alpha;
1416 }
Alex Gildayc357c472018-03-21 13:54:09 +00001417 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001418 float beta() const
1419 {
1420 return _beta;
1421 }
Alex Gildayc357c472018-03-21 13:54:09 +00001422 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001423 float kappa() const
1424 {
1425 return _kappa;
1426 }
Michele Di Giorgio9d3a8312018-11-20 12:31:24 +00001427 /** Get the is_scaled value */
1428 bool is_scaled() const
1429 {
1430 return _is_scaled;
1431 }
Alex Gildayc357c472018-03-21 13:54:09 +00001432 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001433 bool is_cross_map() const
1434 {
1435 return _type == NormType::CROSS_MAP;
1436 }
Alex Gildayc357c472018-03-21 13:54:09 +00001437 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001438 bool is_in_map() const
1439 {
1440 return !is_cross_map();
1441 }
1442 /** Return the scaling factor of the normalization function.
1443 *
1444 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1445 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001446 *
1447 * @return The normalization scaling factor.
1448 */
1449 float scale_coeff() const
1450 {
1451 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001452 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001453 }
1454
1455private:
1456 NormType _type;
1457 uint32_t _norm_size;
1458 float _alpha;
1459 float _beta;
1460 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001461 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001462};
1463
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001464/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001465class WeightsInfo
1466{
1467public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001468 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001469 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001470 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001471 {
1472 }
1473 /** Constructor
1474 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001475 * @param[in] are_reshaped True if the weights have been reshaped
1476 * @param[in] kernel_width Kernel width.
1477 * @param[in] kernel_height Kernel height.
1478 * @param[in] num_kernels Number of convolution kernels.
1479 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001480 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001481 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1482 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001483 {
1484 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001485 /** Flag which specifies if the weights tensor has been reshaped.
1486 *
1487 * @return True if the weights tensors has been reshaped
1488 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001489 bool are_reshaped() const
1490 {
1491 return _are_reshaped;
1492 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001493 /** Return the number of convolution kernels
1494 *
1495 * @return The number of convolution kernels
1496 */
1497 unsigned int num_kernels() const
1498 {
1499 return _num_kernels;
1500 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001501 /** Return the width and height of the kernel
1502 *
1503 * @return The width and height of the kernel
1504 */
1505 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001506 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001507 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001508 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001509 bool retain_internal_weights() const
1510 {
1511 return _retain_internal_weights;
1512 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001513
1514private:
1515 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001516 const unsigned int _kernel_width;
1517 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001518 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001519 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001520};
1521
Gian Marco36a0a462018-01-12 10:21:40 +00001522/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1523 *
1524 * The matrix A can only be reshaped through @ref CLGEMMInterleave4x4Kernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1525 * Note: Optionally just for @ref CLGEMMInterleave4x4Kernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
1526 *
1527 * The matrix B can only be reshaped through @ref CLGEMMTranspose1xWKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1528 * Note: Optionally just for @ref CLGEMMTranspose1xWKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
1529 *
1530 */
1531class GEMMReshapeInfo final
1532{
1533public:
1534 /** Default constructor */
1535 GEMMReshapeInfo()
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001536 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001537 {
1538 }
1539 /** Constructor
1540 *
1541 * @param[in] m Number of matrix A rows
1542 * @param[in] n Number of matrix B columns
1543 * @param[in] k Number of matrix A columns or matrix B rows
1544 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1545 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001546 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel.
1547 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001548 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1549 * to perform 1x1 convolutions with the NHWC data layout)
Gian Marco36a0a462018-01-12 10:21:40 +00001550 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001551 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001552 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
1553 _reinterpret_input_as_3d(reinterpret_input_as_3d)
Gian Marco36a0a462018-01-12 10:21:40 +00001554 {
1555 }
1556 /** Number of matrix A rows
1557 *
1558 * @return the number of matrix A rows
1559 */
1560 int m() const
1561 {
1562 return _m;
1563 }
1564 /** Number of matrix B columns
1565 *
1566 * @return the number of matrix B columns
1567 */
1568 int n() const
1569 {
1570 return _n;
1571 }
1572 /** Number of matrix A columns or matrix B rows
1573 *
1574 * @return the number of matrix A columns or matrix B rows
1575 */
1576 int k() const
1577 {
1578 return _k;
1579 }
1580 /** Multiplication factor for the width of the 1xW transposed block
1581 *
1582 * @return the multiplication factor for the width of the 1xW transposed block
1583 */
1584 int mult_transpose1xW_width() const
1585 {
1586 return _mult_transpose1xW_width;
1587 }
1588 /** Multiplication factor for the height of the 4x4 interleaved block
1589 *
1590 * @return the multiplication factor for the height of the 4x4 interleaved block
1591 */
1592 int mult_interleave4x4_height() const
1593 {
1594 return _mult_interleave4x4_height;
1595 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001596 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1597 *
1598 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1599 * m = depth_output_gemm3d * output_height
1600 *
1601 * @return the depth of the output tensor to be used with the GEMM3D kernel
1602 */
1603 int depth_output_gemm3d() const
1604 {
1605 return _depth_output_gemm3d;
1606 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001607 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1608 *
1609 * @return True if the input tensor has to be reinterpreted as 3D tensor
1610 */
1611 bool reinterpret_input_as_3d() const
1612 {
1613 return _reinterpret_input_as_3d;
1614 };
Gian Marco36a0a462018-01-12 10:21:40 +00001615
1616private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001617 const int _m;
1618 const int _n;
1619 const int _k;
1620 const int _mult_transpose1xW_width;
1621 const int _mult_interleave4x4_height;
1622 const int _depth_output_gemm3d;
1623 const bool _reinterpret_input_as_3d;
Gian Marco36a0a462018-01-12 10:21:40 +00001624};
1625
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001626/** GEMMLowp output stage type */
1627enum class GEMMLowpOutputStageType
1628{
1629 NONE, /**< No quantization to uint8 */
1630 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1631 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1632 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1633};
1634
1635/** GEMMLowp output stage info */
1636struct GEMMLowpOutputStageInfo
1637{
1638 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1639 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1640 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1641 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1642 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1643 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1644};
1645
Gian Marco36a0a462018-01-12 10:21:40 +00001646/** GEMM information class. This class stores the necessary information to compute GEMM functions
1647 *
1648 * This object also contains the information about how matrix A and matrix B have been reshaped
1649 *
1650 */
Chunosov5124be52017-11-22 20:42:13 +07001651class GEMMInfo
1652{
1653public:
1654 /** Default constructor */
1655 GEMMInfo()
Anthony Barbier08a45172018-11-30 17:20:26 +00001656 : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(true), _depth_output_gemm3d(0), _reinterpret_input_as_3d(false), _retain_internal_weights(false), _gemmlowp_output_stage(),
1657 _fp_mixed_precision(false)
Chunosov5124be52017-11-22 20:42:13 +07001658 {
1659 }
1660 /** Constructor
1661 *
1662 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1663 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1664 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001665 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001666 * If 0 the output will not be reinterpreted as 3D. Default 0
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001667 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1668 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001669 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001670 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001671 * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001672 *
Chunosov5124be52017-11-22 20:42:13 +07001673 */
Gian Marco Iodice3139f032018-11-05 14:26:32 +00001674 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001675 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false)
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001676 : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001677 _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage), _fp_mixed_precision(fp_mixed_precision)
Chunosov5124be52017-11-22 20:42:13 +07001678 {
1679 }
1680 /** Flag which specifies if the matrix A has been reshaped
1681 *
1682 * @return True if the matrix A has been reshaped
1683 */
1684 bool is_a_reshaped() const
1685 {
1686 return _is_a_reshaped;
1687 };
1688 /** Flag which specifies if the matrix B has been reshaped
1689 *
1690 * @return True if the matrix B has been reshaped
1691 */
1692 bool is_b_reshaped() const
1693 {
1694 return _is_b_reshaped;
1695 };
1696 /** Flag which specifies if the reshape of matrix B should executed only for the first
1697 *
1698 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1699 *
1700 * @return True if the reshaped of matrix B happens only for the first run
1701 */
1702 bool reshape_b_only_on_first_run() const
1703 {
1704 return _reshape_b_only_on_first_run;
1705 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001706 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001707 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001708 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001709 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001710 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001711 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001712 return _depth_output_gemm3d;
1713 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001714 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1715 *
1716 * @return True if the input tensor has to be reinterpreted as 3D tensor
1717 */
1718 bool reinterpret_input_as_3d() const
1719 {
1720 return _reinterpret_input_as_3d;
1721 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001722 /** Flag which specifies if the weights tensor has to be retained from previous run
1723 *
1724 * @return True if the weights tensor has to be retained
1725 */
1726 bool retain_internal_weights() const
1727 {
1728 return _retain_internal_weights;
1729 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001730 /** GEMMLowp output stage
1731 *
1732 * @return the GEMMLowp output stage info
1733 */
1734 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1735 {
1736 return _gemmlowp_output_stage;
1737 };
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001738 /** Flag which specifies if a wider accumulator should be used.
1739 *
1740 * @return True if a wider accumulator has to be used
1741 */
1742 bool fp_mixed_precision() const
1743 {
1744 return _fp_mixed_precision;
1745 };
Chunosov5124be52017-11-22 20:42:13 +07001746
1747private:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001748 const bool _is_a_reshaped;
1749 const bool _is_b_reshaped;
1750 const bool _reshape_b_only_on_first_run;
1751 const int _depth_output_gemm3d;
1752 const bool _reinterpret_input_as_3d;
1753 const bool _retain_internal_weights;
1754 const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +00001755 const bool _fp_mixed_precision;
Chunosov5124be52017-11-22 20:42:13 +07001756};
1757
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001758/** Winograd information */
1759struct WinogradInfo
1760{
1761 /** Default constructor
1762 *
1763 * @param[in] output_tile_sz Width and height of the output tile
1764 * @param[in] kernel_sz Width and height of the kernel
1765 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1766 * @param[in] conv_info Convolution info (Pads, strides)
1767 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1768 */
1769 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1770 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1771 {
1772 }
1773
1774 Size2D output_tile_size{}; /**< Width and height of the output tile */
1775 Size2D kernel_size{}; /**< Width and height of the kernel*/
1776 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1777 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1778 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1779};
1780
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001781/** IO formatting information class*/
1782struct IOFormatInfo
1783{
1784 /** Precision type used when printing floating point numbers */
1785 enum class PrecisionType
1786 {
1787 Default, /**< Default precision to the one that the current stream has */
1788 Custom, /**< Custom precision specified by the user using the precision parameter */
1789 Full /**< The maximum precision of the floating point representation */
1790 };
1791
1792 /** Specifies the area to be printed, used by Tensor objects */
1793 enum class PrintRegion
1794 {
1795 ValidRegion, /**< Prints the valid region of the Tensor object */
1796 NoPadding, /**< Prints the Tensor object without the padding */
1797 Full /**< Print the tensor object including padding */
1798 };
1799
Alex Gildayc357c472018-03-21 13:54:09 +00001800 /** Construct a set of IO formatting information.
1801 *
1802 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1803 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1804 * @param[in] precision Precision value for float point numbers. Default: 10.
1805 * @param[in] align_columns Whether to align columns when printed. Default: true.
1806 * @param[in] element_delim Delimeter between elements. Default: " ".
1807 * @param[in] row_delim Delimenter between rows. Default: "\n".
1808 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001809 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1810 PrecisionType precision_type = PrecisionType::Default,
1811 unsigned int precision = 10,
1812 bool align_columns = true,
1813 std::string element_delim = " ",
1814 std::string row_delim = "\n")
1815 : print_region(print_region),
1816 precision_type(precision_type),
1817 precision(precision),
1818 element_delim(element_delim),
1819 row_delim(row_delim),
1820 align_columns(align_columns)
1821 {
1822 }
1823
Alex Gildayc357c472018-03-21 13:54:09 +00001824 /** Area to be printed by Tensor objects */
1825 PrintRegion print_region;
1826 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001827 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001828 /** Floating point precision */
1829 unsigned int precision;
1830 /** Element delimeter */
1831 std::string element_delim;
1832 /** Row delimeter */
1833 std::string row_delim;
1834 /** Align columns */
1835 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001836};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001837} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001838#endif /* __ARM_COMPUTE_TYPES_H__ */