blob: 134b8e2905f65f20f4c09c43fbc4b73ba94eb485 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2016-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TYPES_H__
25#define __ARM_COMPUTE_TYPES_H__
26
27#include "arm_compute/core/Coordinates.h"
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000028#include "arm_compute/core/QAsymm8.h"
29#include "arm_compute/core/Rounding.h"
Isabella Gottardi6e464c32018-01-26 12:32:45 +000030#include "arm_compute/core/Size2D.h"
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000031#include "arm_compute/core/Strides.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/core/TensorShape.h"
Georgios Pinitas583137c2017-08-31 18:12:42 +010033#include "support/Half.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Michel Iwaniec5dfeae62017-11-29 10:48:23 +000035#include <cmath>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036#include <cstddef>
37#include <cstdint>
38#include <string>
39#include <utility>
40
41namespace arm_compute
42{
Georgios Pinitas583137c2017-08-31 18:12:42 +010043/** 16-bit floating point type */
44using half = half_float::half;
45
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000046/** Permutation vector */
47using PermutationVector = Strides;
Georgios Pinitas77589b52018-08-21 14:41:35 +010048/** Bidirectional strides */
49using BiStrides = Coordinates;
Georgios Pinitas8795ffb2017-12-01 16:13:40 +000050
Anthony Barbier6ff3b192017-09-04 18:44:23 +010051/** Image colour formats */
52enum class Format
53{
Daniil Efremov02bf80d2017-11-22 00:26:51 +070054 UNKNOWN, /**< Unknown image format */
55 U8, /**< 1 channel, 1 U8 per channel */
56 S16, /**< 1 channel, 1 S16 per channel */
57 U16, /**< 1 channel, 1 U16 per channel */
58 S32, /**< 1 channel, 1 S32 per channel */
59 U32, /**< 1 channel, 1 U32 per channel */
60 F16, /**< 1 channel, 1 F16 per channel */
61 F32, /**< 1 channel, 1 F32 per channel */
62 UV88, /**< 2 channel, 1 U8 per channel */
63 RGB888, /**< 3 channels, 1 U8 per channel */
64 RGBA8888, /**< 4 channels, 1 U8 per channel */
65 YUV444, /**< A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes */
66 YUYV422, /**< A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes */
67 NV12, /**< A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling */
68 NV21, /**< A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling */
69 IYUV, /**< A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes */
70 UYVY422 /**< A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010071};
72
73/** Available data types */
74enum class DataType
75{
Alex Gildayc357c472018-03-21 13:54:09 +000076 UNKNOWN, /**< Unknown data type */
77 U8, /**< unsigned 8-bit number */
78 S8, /**< signed 8-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000079 QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
80 U16, /**< unsigned 16-bit number */
81 S16, /**< signed 16-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000082 U32, /**< unsigned 32-bit number */
83 S32, /**< signed 32-bit number */
Alex Gildayc357c472018-03-21 13:54:09 +000084 U64, /**< unsigned 64-bit number */
85 S64, /**< signed 64-bit number */
86 F16, /**< 16-bit floating-point number */
87 F32, /**< 32-bit floating-point number */
88 F64, /**< 64-bit floating-point number */
89 SIZET /**< size_t */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010090};
91
Daniil Efremov02bf80d2017-11-22 00:26:51 +070092/** Available Sampling Policies */
93enum class SamplingPolicy
94{
95 CENTER, /**< Samples are taken at pixel center */
96 TOP_LEFT /**< Samples are taken at pixel top left corner */
97};
98
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099/** Constant value of the border pixels when using BorderMode::CONSTANT */
100constexpr uint8_t CONSTANT_BORDER_VALUE = 199;
101
Alex Gildayc357c472018-03-21 13:54:09 +0000102/** Constant value used to indicate a half-scale pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103constexpr float SCALE_PYRAMID_HALF = 0.5f;
104
Alex Gildayc357c472018-03-21 13:54:09 +0000105/** Constant value used to indicate a ORB scaled pyramid */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106constexpr float SCALE_PYRAMID_ORB = 8.408964152537146130583778358414e-01;
107
Georgios Pinitas4074c992018-01-30 18:13:46 +0000108/** Supported tensor data layouts */
109enum class DataLayout
110{
Alex Gildayc357c472018-03-21 13:54:09 +0000111 UNKNOWN, /**< Unknown data layout */
112 NCHW, /**< Num samples, channels, height, width */
113 NHWC /**< Num samples, height, width, channels */
Georgios Pinitas4074c992018-01-30 18:13:46 +0000114};
115
Isabella Gottardid17a6772018-02-27 17:41:55 +0000116/** Supported tensor data layout dimensions */
117enum class DataLayoutDimension
118{
Alex Gildayc357c472018-03-21 13:54:09 +0000119 CHANNEL, /**< channel */
120 HEIGHT, /**< height */
121 WIDTH, /**< width */
122 BATCHES /**< batches */
Isabella Gottardid17a6772018-02-27 17:41:55 +0000123};
124
Michel Iwaniec00633802017-10-12 14:14:15 +0100125/** Quantization settings (used for QASYMM8 data type) */
126struct QuantizationInfo
127{
Alex Gildayc357c472018-03-21 13:54:09 +0000128 /** Default constructor */
Georgios Pinitasf8d8f3a2018-06-06 17:57:04 +0100129 QuantizationInfo() noexcept
130 : scale(0.0f),
131 offset(0)
Michel Iwaniec00633802017-10-12 14:14:15 +0100132 {
133 }
134
Alex Gildayc357c472018-03-21 13:54:09 +0000135 /** Construct quantization info.
136 *
137 * @param[in] scale Scale.
138 * @param[in] offset Offset.
139 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100140 QuantizationInfo(float scale, int offset)
141 : scale(scale), offset(offset)
142 {
143 }
144
Alex Gildayc357c472018-03-21 13:54:09 +0000145 /** Check whether equal to a given quantization info.
146 *
147 * @param[in] other Other quantization info.
148 *
149 * @return True if the given quantization info is the same.
150 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100151 bool operator==(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700152 {
153 return scale == other.scale && offset == other.offset;
154 }
155
Alex Gildayc357c472018-03-21 13:54:09 +0000156 /** Check whether not equal to a given quantization info.
157 *
158 * @param[in] other Other quantization info.
159 *
160 * @return True if the given quantization info is not the same.
161 */
Georgios Pinitas08346e92018-10-16 19:10:46 +0100162 bool operator!=(const QuantizationInfo &other) const
Daniil Efremoveed841c2017-11-09 19:05:25 +0700163 {
164 return !(*this == other);
165 }
166
Michel Iwaniec00633802017-10-12 14:14:15 +0100167 float scale; /**< scale */
168 int offset; /**< offset */
169
Alex Gildayc357c472018-03-21 13:54:09 +0000170 /** Quantizes a value using the scale/offset in this QuantizationInfo
171 *
172 * @param[in] value Value to quantize.
173 * @param[in] rounding_policy Policy to use when rounding.
174 *
175 * @return the quantized value.
176 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000177 qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100178 {
179 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000180 return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
Michel Iwaniec00633802017-10-12 14:14:15 +0100181 }
182
Alex Gildayc357c472018-03-21 13:54:09 +0000183 /** Dequantizes a value using the scale/offset in this QuantizationInfo
184 *
185 * @param[in] value Value to dequantize.
186 *
187 * @return the original value before quantization.
188 */
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000189 float dequantize(qasymm8_t value) const
Michel Iwaniec00633802017-10-12 14:14:15 +0100190 {
191 ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
Michel Iwaniec5dfeae62017-11-29 10:48:23 +0000192 return scvt_f32_qasymm8(value, scale, offset);
Michel Iwaniec00633802017-10-12 14:14:15 +0100193 }
194
Alex Gildayc357c472018-03-21 13:54:09 +0000195 /** Indicates whether this QuantizationInfo has valid settings or not
196 *
197 * @return True if the this has invalid settings.
198 */
Michel Iwaniec00633802017-10-12 14:14:15 +0100199 bool empty() const
200 {
201 return scale == 0;
202 }
203};
204
Alex Gildayc357c472018-03-21 13:54:09 +0000205/** Container for valid region of a window */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100206struct ValidRegion
207{
Alex Gildayc357c472018-03-21 13:54:09 +0000208 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100209 ValidRegion()
210 : anchor{}, shape{}
211 {
212 }
213
Alex Gildayc357c472018-03-21 13:54:09 +0000214 /** Allow instances of this class to be copy constructed */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100215 ValidRegion(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000216 /** Allow instances of this class to be move constructed */
217 ValidRegion(ValidRegion &&) = default;
218 /** Allow instances of this class to be copied */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100219 ValidRegion &operator=(const ValidRegion &) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000220 /** Allow instances of this class to be moved */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100221 ValidRegion &operator=(ValidRegion &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000222 /** Default destructor */
223 ~ValidRegion() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100224
Alex Gildayc357c472018-03-21 13:54:09 +0000225 /** Constructor for a valid region with default number of dimensions
226 *
227 * @param[in] an_anchor Anchor for the start of the valid region.
228 * @param[in] a_shape Shape of the valid region.
229 *
230 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000231 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
232 : anchor{ an_anchor }, shape{ a_shape }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100233 {
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000234 anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
235 }
236
Alex Gildayc357c472018-03-21 13:54:09 +0000237 /** Constructor for a valid region with specified number of dimensions
238 *
239 * @param[in] an_anchor Anchor for the start of the valid region.
240 * @param[in] a_shape Shape of the valid region.
241 * @param[in] num_dimensions Number of dimensions (must be >= number of dimensions of anchor and shape).
242 *
243 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000244 ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
245 : anchor{ an_anchor }, shape{ a_shape }
246 {
247 ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
248 anchor.set_num_dimensions(num_dimensions);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100249 }
250
251 /** Return the start of the valid region for the given dimension @p d */
252 int start(unsigned int d) const
253 {
254 return anchor[d];
255 }
256
257 /** Return the end of the valid region for the given dimension @p d */
258 int end(unsigned int d) const
259 {
260 return anchor[d] + shape[d];
261 }
262
Diego Lopez Recas35ceeb22017-12-04 18:56:10 +0000263 /** Accessor to set the value of anchor and shape for one of the dimensions.
264 *
265 * @param[in] dimension Dimension for which the value is set.
266 * @param[in] start Value to be set in anchor for the dimension.
267 * @param[in] size Value to be set in shape for the dimension.
268 *
269 * @return *this.
270 */
271 ValidRegion &set(size_t dimension, int start, size_t size)
272 {
273 anchor.set(dimension, start);
274 shape.set(dimension, size);
275 return *this;
276 }
277
Alex Gildayc357c472018-03-21 13:54:09 +0000278 Coordinates anchor; /**< Anchor for the start of the valid region. */
279 TensorShape shape; /**< Shape of the valid region. */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100280};
281
282/** Methods available to handle borders */
283enum class BorderMode
284{
285 UNDEFINED, /**< Borders are left undefined */
286 CONSTANT, /**< Pixels outside the image are assumed to have a constant value */
287 REPLICATE /**< Pixels outside the image are assumed to have the same value as the closest image pixel */
288};
289
290/** Container for 2D border size */
291struct BorderSize
292{
293 /** Empty border, i.e. no border */
294 constexpr BorderSize()
295 : top{ 0 }, right{ 0 }, bottom{ 0 }, left{ 0 }
296 {
297 }
298
299 /** Border with equal size around the 2D plane */
Moritz Pflanzer7655a672017-09-23 11:57:33 +0100300 explicit constexpr BorderSize(unsigned int size)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100301 : top{ size }, right{ size }, bottom{ size }, left{ size }
302 {
303 }
304
305 /** Border with same size for top/bottom and left/right */
306 constexpr BorderSize(unsigned int top_bottom, unsigned int left_right)
307 : top{ top_bottom }, right{ left_right }, bottom{ top_bottom }, left{ left_right }
308 {
309 }
310
311 /** Border with different sizes */
312 constexpr BorderSize(unsigned int top, unsigned int right, unsigned int bottom, unsigned int left)
313 : top{ top }, right{ right }, bottom{ bottom }, left{ left }
314 {
315 }
316
317 /** Check if the entire border is zero */
318 constexpr bool empty() const
319 {
320 return top == 0 && right == 0 && bottom == 0 && left == 0;
321 }
322
323 /** Check if the border is the same size on all sides */
324 constexpr bool uniform() const
325 {
326 return top == right && top == bottom && top == left;
327 }
328
Alex Gildayc357c472018-03-21 13:54:09 +0000329 /** Scale this border size.
330 *
331 * @param[in] scale Scale to multiply border size by.
332 *
333 * @return *this.
334 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100335 BorderSize &operator*=(float scale)
336 {
337 top *= scale;
338 right *= scale;
339 bottom *= scale;
340 left *= scale;
341
342 return *this;
343 }
344
Alex Gildayc357c472018-03-21 13:54:09 +0000345 /** Scale a copy of this border size.
346 *
347 * @param[in] scale Scale to multiply border size by.
348 *
349 * @return a scaled copy of this.
350 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100351 BorderSize operator*(float scale)
352 {
353 BorderSize size = *this;
354 size *= scale;
355
356 return size;
357 }
358
Alex Gildayc357c472018-03-21 13:54:09 +0000359 /** Limit this border size.
360 *
361 * @param[in] limit Border size to limit this border size to.
362 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100363 void limit(const BorderSize &limit)
364 {
365 top = std::min(top, limit.top);
366 right = std::min(right, limit.right);
367 bottom = std::min(bottom, limit.bottom);
368 left = std::min(left, limit.left);
369 }
370
Alex Gildayc357c472018-03-21 13:54:09 +0000371 unsigned int top; /**< top of the border */
372 unsigned int right; /**< right of the border */
373 unsigned int bottom; /**< bottom of the border */
374 unsigned int left; /**< left of the border */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100375};
376
Alex Gildayc357c472018-03-21 13:54:09 +0000377/** Container for 2D padding size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100378using PaddingSize = BorderSize;
379
380/** Policy to handle overflow */
381enum class ConvertPolicy
382{
383 WRAP, /**< Wrap around */
384 SATURATE /**< Saturate */
385};
386
387/** Interpolation method */
388enum class InterpolationPolicy
389{
390 NEAREST_NEIGHBOR, /**< Output values are defined to match the source pixel whose center is nearest to the sample position */
391 BILINEAR, /**< Output values are defined by bilinear interpolation between the pixels */
392 AREA, /**< Output values are determined by averaging the source pixels whose areas fall under the area of the destination pixel, projected onto the source image */
393};
394
395/** Bilinear Interpolation method used by LKTracker */
396enum class BilinearInterpolation
397{
Alex Gildayc357c472018-03-21 13:54:09 +0000398 BILINEAR_OLD_NEW, /**< Old-new method */
399 BILINEAR_SCHARR /**< Scharr method */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100400};
401
402/** Threshold mode */
403enum class ThresholdType
404{
405 BINARY, /**< Threshold with one value */
406 RANGE /**< Threshold with two values*/
407};
408
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100409/** Termination criteria */
410enum class Termination
411{
Alex Gildayc357c472018-03-21 13:54:09 +0000412 TERM_CRITERIA_EPSILON, /**< Terminate when within epsilon of a threshold */
413 TERM_CRITERIA_ITERATIONS, /**< Terminate after a maximum number of iterations */
414 TERM_CRITERIA_BOTH /**< Terminate on whichever of the other conditions occurs first */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100415};
416
417/** Magnitude calculation type. */
418enum class MagnitudeType
419{
420 L1NORM, /**< L1 normalization type */
421 L2NORM /**< L2 normalization type */
422};
423
424/** Phase calculation type.
425 *
426 * @note When PhaseType == SIGNED, each angle is mapped to the range 0 to 255 inclusive otherwise angles between 0 and 180
427 */
428enum class PhaseType
429{
430 SIGNED, /**< Angle range: [0, 360] */
431 UNSIGNED /**< Angle range: [0, 180] */
432};
433
434/** Keypoint type */
435struct KeyPoint
436{
437 int32_t x{ 0 }; /**< X coordinates */
438 int32_t y{ 0 }; /**< Y coordinates */
439 float strength{ 0.f }; /**< Strength of the point */
440 float scale{ 0.f }; /**< Scale initialized to 0 by the corner detector */
441 float orientation{ 0.f }; /**< Orientation initialized to 0 by the corner detector */
442 int32_t tracking_status{ 0 }; /**< Status initialized to 1 by the corner detector, set to 0 when the point is lost */
443 float error{ 0.f }; /**< Tracking error initialized to 0 by the corner detector */
444};
445
Alex Gildayc357c472018-03-21 13:54:09 +0000446/** Internal key point */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100447using InternalKeypoint = std::tuple<float, float, float>; /* x,y,strength */
448
449/** Rectangle type */
450struct Rectangle
451{
452 uint16_t x; /**< Top-left x coordinate */
453 uint16_t y; /**< Top-left y coordinate */
454 uint16_t width; /**< Width of the rectangle */
455 uint16_t height; /**< Height of the rectangle */
456};
457
458/** Coordinate type */
459struct Coordinates2D
460{
461 int32_t x; /**< X coordinates */
462 int32_t y; /**< Y coordinates */
463};
464
465/** Coordinate type */
466struct Coordinates3D
467{
468 uint32_t x; /**< X coordinates */
469 uint32_t y; /**< Y coordinates */
470 uint32_t z; /**< Z coordinates */
471};
472
Giuseppe Rossinid7647d42018-07-17 18:13:13 +0100473/** Padding information as a pair of unsigned int start/end */
474using PaddingInfo = std::pair<uint32_t, uint32_t>;
475
476/** List of padding information */
477using PaddingList = std::vector<PaddingInfo>;
478
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100479/** Region of interest */
480struct ROI
481{
482 Rectangle rect; /**< Rectangle specifying the region of interest */
483 uint16_t batch_idx; /**< The batch index of the region of interest */
484};
485
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100486/** Available channels */
487enum class Channel
488{
489 UNKNOWN, /** Unknown channel format */
490 C0, /**< First channel (used by formats with unknown channel types). */
491 C1, /**< Second channel (used by formats with unknown channel types). */
492 C2, /**< Third channel (used by formats with unknown channel types). */
493 C3, /**< Fourth channel (used by formats with unknown channel types). */
494 R, /**< Red channel. */
495 G, /**< Green channel. */
496 B, /**< Blue channel. */
497 A, /**< Alpha channel. */
498 Y, /**< Luma channel. */
499 U, /**< Cb/U channel. */
500 V /**< Cr/V/Value channel. */
501};
502
503/** Available matrix patterns */
504enum class MatrixPattern
505{
506 BOX, /**< Box pattern matrix. */
507 CROSS, /**< Cross pattern matrix. */
508 DISK, /**< Disk pattern matrix. */
509 OTHER /**< Any other matrix pattern. */
510};
511
512/** Available non linear functions. */
513enum class NonLinearFilterFunction : unsigned
514{
515 MEDIAN = 0, /**< Non linear median filter. */
516 MIN = 1, /**< Non linear erode. */
517 MAX = 2, /**< Non linear dilate. */
518};
519
Georgios Pinitasd9769582017-08-03 10:19:40 +0100520/** Available reduction operations */
521enum class ReductionOperation
522{
523 SUM_SQUARE, /**< Sum of squares */
Michalis Spyrou04f089c2017-08-08 17:42:38 +0100524 SUM, /**< Sum */
Michalis Spyrou7e9391b2018-10-05 14:49:28 +0100525 MEAN_SUM, /**< Mean of sum */
Georgios Pinitasd9769582017-08-03 10:19:40 +0100526};
527
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100528/** The normalization type used for the normalization layer */
529enum class NormType
530{
531 IN_MAP_1D, /**< Normalization applied within the same map in 1D region */
532 IN_MAP_2D, /**< Normalization applied within the same map in 2D region */
533 CROSS_MAP /**< Normalization applied cross maps */
534};
535
536/** Normalization type for Histogram of Oriented Gradients (HOG) */
537enum class HOGNormType
538{
539 L2_NORM = 1, /**< L2-norm */
540 L2HYS_NORM = 2, /**< L2-norm followed by clipping */
541 L1_NORM = 3 /**< L1 norm */
542};
543
544/** Detection window used for the object detection. The detection window keeps the following information:
545 *
546 * -# Geometry of the rectangular window (x/y of top-left corner and width/height)
547 * -# Index of the class used for evaluating which class the detection window belongs to
548 * -# Confidence value (score) obtained with the classifier
549 */
550struct DetectionWindow
551{
552 uint16_t x{ 0 }; /**< Top-left x coordinate */
553 uint16_t y{ 0 }; /**< Top-left y coordinate */
554 uint16_t width{ 0 }; /**< Width of the detection window */
555 uint16_t height{ 0 }; /**< Height of the detection window */
556 uint16_t idx_class{ 0 }; /**< Index of the class */
557 float score{ 0.f }; /**< Confidence value for the detection window */
558};
559
560/** Dimension rounding type when down-scaling on CNNs
561 * @note Used in pooling and convolution layer
562 */
563enum class DimensionRoundingType
564{
565 FLOOR, /**< Floor rounding */
566 CEIL /**< Ceil rounding */
567};
568
569/** Available pooling types */
570enum class PoolingType
571{
572 MAX, /**< Max Pooling */
Georgios Pinitascdf51452017-08-31 14:21:36 +0100573 AVG, /**< Average Pooling */
574 L2 /**< L2 Pooling */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100575};
576
577/** Padding and stride information class */
578class PadStrideInfo
579{
580public:
581 /** Constructor
582 *
583 * @param[in] stride_x (Optional) Stride, in elements, across x. Defaults to 1.
584 * @param[in] stride_y (Optional) Stride, in elements, across y. Defaults to 1.
585 * @param[in] pad_x (Optional) Padding, in elements, across x. Defaults to 0.
586 * @param[in] pad_y (Optional) Padding, in elements, across y. Defaults to 0.
587 * @param[in] round (Optional) Dimensions rounding. Defaults to @ref FLOOR.
588 */
589 PadStrideInfo(unsigned int stride_x = 1, unsigned int stride_y = 1,
590 unsigned int pad_x = 0, unsigned int pad_y = 0,
591 DimensionRoundingType round = DimensionRoundingType::FLOOR)
592 : _stride(std::make_pair(stride_x, stride_y)),
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100593 _pad_left(pad_x),
594 _pad_top(pad_y),
595 _pad_right(pad_x),
596 _pad_bottom(pad_y),
597 _round_type(round)
598 {
599 }
600 /** Constructor
601 *
602 * @param[in] stride_x Stride, in elements, across x.
603 * @param[in] stride_y Stride, in elements, across y.
604 * @param[in] pad_left Padding across x on the left, in elements.
605 * @param[in] pad_top Padding across y on the top, in elements.
606 * @param[in] pad_right Padding across x on the right, in elements.
607 * @param[in] pad_bottom Padding across y on the bottom, in elements.
608 * @param[in] round Dimensions rounding.
609 */
610 PadStrideInfo(unsigned int stride_x, unsigned int stride_y,
611 unsigned int pad_left, unsigned int pad_right,
612 unsigned int pad_top, unsigned int pad_bottom,
613 DimensionRoundingType round)
614 : _stride(std::make_pair(stride_x, stride_y)),
615 _pad_left(pad_left),
616 _pad_top(pad_top),
617 _pad_right(pad_right),
618 _pad_bottom(pad_bottom),
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100619 _round_type(round)
620 {
621 }
Alex Gildayc357c472018-03-21 13:54:09 +0000622 /** Get the stride.
623 *
624 * @return a pair: stride x, stride y.
625 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100626 std::pair<unsigned int, unsigned int> stride() const
627 {
628 return _stride;
629 }
Alex Gildayc357c472018-03-21 13:54:09 +0000630 /** Check whether the padding is symmetric.
631 *
632 * @return True if the padding is symmetric.
633 */
Anthony Barbier21f67d62018-02-16 15:17:48 +0000634 bool padding_is_symmetric() const
635 {
636 return (_pad_left == _pad_right) && (_pad_top == _pad_bottom);
637 }
Alex Gildayc357c472018-03-21 13:54:09 +0000638 /** Get the padding.
639 *
640 * @note This should only be used when the padding is symmetric.
641 *
642 * @return a pair: padding left/right, padding top/bottom
643 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100644 std::pair<unsigned int, unsigned int> pad() const
645 {
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100646 //this accessor should be used only when padding is symmetric
Anthony Barbier21f67d62018-02-16 15:17:48 +0000647 ARM_COMPUTE_ERROR_ON(!padding_is_symmetric());
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100648 return std::make_pair(_pad_left, _pad_top);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100649 }
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100650
Alex Gildayc357c472018-03-21 13:54:09 +0000651 /** Get the left padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100652 unsigned int pad_left() const
653 {
654 return _pad_left;
655 }
Alex Gildayc357c472018-03-21 13:54:09 +0000656 /** Get the right padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100657 unsigned int pad_right() const
658 {
659 return _pad_right;
660 }
Alex Gildayc357c472018-03-21 13:54:09 +0000661 /** Get the top padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100662 unsigned int pad_top() const
663 {
664 return _pad_top;
665 }
Alex Gildayc357c472018-03-21 13:54:09 +0000666 /** Get the bottom padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100667 unsigned int pad_bottom() const
668 {
669 return _pad_bottom;
670 }
671
Alex Gildayc357c472018-03-21 13:54:09 +0000672 /** Get the rounding type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100673 DimensionRoundingType round() const
674 {
675 return _round_type;
676 }
677
Alex Gildayc357c472018-03-21 13:54:09 +0000678 /** Check whether this has any padding */
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100679 bool has_padding() const
680 {
681 return (_pad_left != 0 || _pad_top != 0 || _pad_right != 0 || _pad_bottom != 0);
682 }
683
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100684private:
685 std::pair<unsigned int, unsigned int> _stride;
Jaroslaw Rzepeckia1ed41f2017-10-13 11:13:58 +0100686 unsigned int _pad_left;
687 unsigned int _pad_top;
688 unsigned int _pad_right;
689 unsigned int _pad_bottom;
690
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100691 DimensionRoundingType _round_type;
692};
693
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100694/** Fully connected layer info */
695struct FullyConnectedLayerInfo
696{
697 DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
698 bool transpose_weights{ true }; /**< Transpose weights if true. */
699 bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
700 bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
Georgios Pinitasc55cef12018-08-01 15:24:18 +0100701
702 /** Sets the weights trained data layout
703 *
704 * @param[in] layout Data layout that the weights were trained with
705 *
706 * @return Updated object
707 */
708 FullyConnectedLayerInfo &set_weights_trained_layout(DataLayout layout)
709 {
710 weights_trained_layout = layout;
711 return *this;
712 }
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100713 /** Sets the transpose weights flag
714 *
715 * @param[in] should_transpose_weights Boolean flag indicating if weights should be transposed
716 *
717 * @return Updated object
718 */
719 FullyConnectedLayerInfo &set_transpose_weights(bool should_transpose_weights)
720 {
721 transpose_weights = should_transpose_weights;
722 return *this;
723 }
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100724};
725
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100726/** Pooling Layer Information class */
727class PoolingLayerInfo
728{
729public:
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000730 /** Default Constructor */
731 PoolingLayerInfo()
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000732 : _pool_type(PoolingType::MAX), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000733 {
734 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100735 /** Default Constructor
736 *
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000737 * @param[in] pool_type Pooling type @ref PoolingType.
738 * @param[in] pool_size Pooling size, in elements, across x and y.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100739 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000740 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
741 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
742 * Defaults to false;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100743 */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000744 explicit PoolingLayerInfo(PoolingType pool_type,
745 unsigned int pool_size,
746 PadStrideInfo pad_stride_info = PadStrideInfo(),
747 bool exclude_padding = false)
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000748 : _pool_type(pool_type), _pool_size(Size2D(pool_size, pool_size)), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
749 {
750 }
751 /** Default Constructor
752 *
753 * @param[in] pool_type Pooling type @ref PoolingType.
754 * @param[in] pool_size Pooling size, in elements, across x and y.
755 * @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
756 * @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
757 * True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
758 * Defaults to false;
759 */
760 explicit PoolingLayerInfo(PoolingType pool_type,
761 Size2D pool_size,
762 PadStrideInfo pad_stride_info = PadStrideInfo(),
763 bool exclude_padding = false)
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000764 : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
765 {
766 }
767 /** Default Constructor
768 *
769 * @note This constructor is used for global pooling
770 *
771 * @param[in] pool_type Pooling type @ref PoolingType.
772 */
773 explicit PoolingLayerInfo(PoolingType pool_type)
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000774 : _pool_type(pool_type), _pool_size(Size2D()), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100775 {
776 }
Alex Gildayc357c472018-03-21 13:54:09 +0000777 /** Get the pooling type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100778 PoolingType pool_type() const
779 {
780 return _pool_type;
781 }
Alex Gildayc357c472018-03-21 13:54:09 +0000782 /** Get the pooling size */
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000783 const Size2D &pool_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100784 {
785 return _pool_size;
786 }
Alex Gildayc357c472018-03-21 13:54:09 +0000787 /** Get the padding and stride */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100788 PadStrideInfo pad_stride_info() const
789 {
790 return _pad_stride_info;
791 }
Alex Gildayc357c472018-03-21 13:54:09 +0000792 /** Check if padding is excluded in calculations */
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000793 bool exclude_padding() const
794 {
795 return _exclude_padding;
796 }
Alex Gildayc357c472018-03-21 13:54:09 +0000797 /** Check if is global pooling */
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000798 bool is_global_pooling() const
799 {
800 return _is_global_pooling;
801 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100802
803private:
804 PoolingType _pool_type;
Isabella Gottardi6e464c32018-01-26 12:32:45 +0000805 Size2D _pool_size;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100806 PadStrideInfo _pad_stride_info;
Georgios Pinitasadaae7e2017-10-30 15:56:32 +0000807 bool _exclude_padding;
Georgios Pinitas4c2dd542017-11-13 12:58:41 +0000808 bool _is_global_pooling;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100809};
810
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100811/** ROI Pooling Layer Information class */
giuros0118870812018-09-13 09:31:40 +0100812class ROIPoolingLayerInfo final
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100813{
814public:
giuros0118870812018-09-13 09:31:40 +0100815 /** Constructor
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100816 *
giuros0118870812018-09-13 09:31:40 +0100817 * @param[in] pooled_width Pooled width of the layer.
818 * @param[in] pooled_height Pooled height of the layer.
819 * @param[in] spatial_scale Spatial scale to be applied to the ROI coordinates and dimensions.
820 * @param[in] sampling_ratio Number of samples to include in each pooling region (if set to zero, a ceil(roi_dims/pooling_dims))
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100821 */
giuros0118870812018-09-13 09:31:40 +0100822 ROIPoolingLayerInfo(unsigned int pooled_width, unsigned int pooled_height, float spatial_scale, unsigned int sampling_ratio = 0)
823 : _pooled_width(pooled_width), _pooled_height(pooled_height), _spatial_scale(spatial_scale), _sampling_ratio(sampling_ratio)
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100824 {
825 }
Alex Gildayc357c472018-03-21 13:54:09 +0000826 /** Get the pooled width of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100827 unsigned int pooled_width() const
828 {
829 return _pooled_width;
830 }
Alex Gildayc357c472018-03-21 13:54:09 +0000831 /** Get the pooled height of the layer */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100832 unsigned int pooled_height() const
833 {
834 return _pooled_height;
835 }
Alex Gildayc357c472018-03-21 13:54:09 +0000836 /** Get the spatial scale */
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100837 float spatial_scale() const
838 {
839 return _spatial_scale;
840 }
giuros0118870812018-09-13 09:31:40 +0100841 /** Get sampling ratio */
842 unsigned int sampling_ratio() const
843 {
844 return _sampling_ratio;
845 }
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100846
847private:
848 unsigned int _pooled_width;
849 unsigned int _pooled_height;
850 float _spatial_scale;
giuros0118870812018-09-13 09:31:40 +0100851 unsigned int _sampling_ratio;
Georgios Pinitas7b7858d2017-06-21 16:44:24 +0100852};
853
giuros01c04a0e82018-10-03 12:44:35 +0100854/** Bounding Box Transform information class */
855class BoundingBoxTransformInfo
856{
857public:
858 /** Constructor
859 *
860 * @param[in] img_width Width of the original image
861 * @param[in] img_height Height, of the original image
862 * @param[in] scale Scale of the original image
863 * @param[in] apply_scale (Optional)Re-apply scaling after transforming the boxes. Defaults to false
864 * @param[in] weights (Optional)Weights [wx, wy, ww, wh] for the deltas. Defaults to all ones
865 * @param[in] bbox_xform_clip (Optional)Minimum bounding box width and height after bounding box transformation in log-space. Defaults to log(1000/16)
866 */
867 BoundingBoxTransformInfo(float img_width, float img_height, float scale, bool apply_scale = false, const std::array<float, 4> weights = { 1.0, 1.0, 1.0, 1.0 }, float bbox_xform_clip =
868 4.135166556742356)
869 : _img_width(img_width), _img_height(img_height), _scale(scale), _apply_scale(apply_scale), _weights(weights), _bbox_xform_clip(bbox_xform_clip)
870 {
871 }
872
873 std::array<float, 4> weights() const
874 {
875 return _weights;
876 }
877
878 float bbox_xform_clip() const
879 {
880 return _bbox_xform_clip;
881 }
882
883 float img_height() const
884 {
885 return _img_height;
886 }
887
888 float img_width() const
889 {
890 return _img_width;
891 }
892
893 float scale() const
894 {
895 return _scale;
896 }
897
898 bool apply_scale() const
899 {
900 return _apply_scale;
901 }
902
903private:
904 float _img_width;
905 float _img_height;
906 float _scale;
907 bool _apply_scale;
908 std::array<float, 4> _weights;
909 float _bbox_xform_clip;
910};
911
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100912/** Activation Layer Information class */
913class ActivationLayerInfo
914{
915public:
916 /** Available activation functions */
917 enum class ActivationFunction
918 {
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +0100919 LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
920 TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
921 RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
922 BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
923 LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
924 LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
925 SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
926 ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
927 SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
928 SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
929 LINEAR /**< Linear ( \f$ f(x)= ax + b \f$ ) */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100930 };
931
Giorgio Arena11674872018-02-07 15:38:12 +0000932 ActivationLayerInfo() = default;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100933 /** Default Constructor
934 *
935 * @param[in] f The activation function to use.
936 * @param[in] a (Optional) The alpha parameter used by some activation functions
Georgios Pinitas64ebe5b2017-09-01 17:44:24 +0100937 * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
938 * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100939 */
940 ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
Giorgio Arena11674872018-02-07 15:38:12 +0000941 : _act(f), _a(a), _b(b), _enabled(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100942 {
943 }
Alex Gildayc357c472018-03-21 13:54:09 +0000944 /** Get the type of activation function */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100945 ActivationFunction activation() const
946 {
947 return _act;
948 }
Alex Gildayc357c472018-03-21 13:54:09 +0000949 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100950 float a() const
951 {
952 return _a;
953 }
Alex Gildayc357c472018-03-21 13:54:09 +0000954 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100955 float b() const
956 {
957 return _b;
958 }
Alex Gildayc357c472018-03-21 13:54:09 +0000959 /** Check if initialised */
Giorgio Arena11674872018-02-07 15:38:12 +0000960 bool enabled() const
961 {
962 return _enabled;
963 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100964
965private:
Giorgio Arena11674872018-02-07 15:38:12 +0000966 ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::LOGISTIC };
967 float _a = {};
968 float _b = {};
969 bool _enabled = { false };
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100970};
971
972/** Normalization Layer Information class */
973class NormalizationLayerInfo
974{
975public:
976 /** Default Constructor
977 *
978 * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NORM_TYPE::CROSS_MAP
979 * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5.
Georgios Pinitas41caa622017-11-16 14:37:08 +0000980 * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001.
981 * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5.
982 * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation.
983 * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not.
984 * Should be false to follow [Krichevksy 2012].
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100985 */
Georgios Pinitas41caa622017-11-16 14:37:08 +0000986 NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true)
987 : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100988 {
989 }
Alex Gildayc357c472018-03-21 13:54:09 +0000990 /** Get the normalization type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100991 NormType type() const
992 {
993 return _type;
994 }
Alex Gildayc357c472018-03-21 13:54:09 +0000995 /** Get the normalization size */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100996 uint32_t norm_size() const
997 {
998 return _norm_size;
999 }
Alex Gildayc357c472018-03-21 13:54:09 +00001000 /** Get the alpha value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001001 float alpha() const
1002 {
1003 return _alpha;
1004 }
Alex Gildayc357c472018-03-21 13:54:09 +00001005 /** Get the beta value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001006 float beta() const
1007 {
1008 return _beta;
1009 }
Alex Gildayc357c472018-03-21 13:54:09 +00001010 /** Get the kappa value */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001011 float kappa() const
1012 {
1013 return _kappa;
1014 }
Alex Gildayc357c472018-03-21 13:54:09 +00001015 /** Check if normalization is cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001016 bool is_cross_map() const
1017 {
1018 return _type == NormType::CROSS_MAP;
1019 }
Alex Gildayc357c472018-03-21 13:54:09 +00001020 /** Check if normalization is not cross map */
Georgios Pinitas41caa622017-11-16 14:37:08 +00001021 bool is_in_map() const
1022 {
1023 return !is_cross_map();
1024 }
1025 /** Return the scaling factor of the normalization function.
1026 *
1027 * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed,
1028 * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001029 *
1030 * @return The normalization scaling factor.
1031 */
1032 float scale_coeff() const
1033 {
1034 const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001035 return (_is_scaled) ? (_alpha / size) : _alpha;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001036 }
1037
1038private:
1039 NormType _type;
1040 uint32_t _norm_size;
1041 float _alpha;
1042 float _beta;
1043 float _kappa;
Georgios Pinitas41caa622017-11-16 14:37:08 +00001044 bool _is_scaled;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001045};
1046
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001047/** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001048class WeightsInfo
1049{
1050public:
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001051 /** Default constructor */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001052 WeightsInfo()
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001053 : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001054 {
1055 }
1056 /** Constructor
1057 *
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001058 * @param[in] are_reshaped True if the weights have been reshaped
1059 * @param[in] kernel_width Kernel width.
1060 * @param[in] kernel_height Kernel height.
1061 * @param[in] num_kernels Number of convolution kernels.
1062 * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001063 */
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001064 WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
1065 : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001066 {
1067 }
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001068 /** Flag which specifies if the weights tensor has been reshaped.
1069 *
1070 * @return True if the weights tensors has been reshaped
1071 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001072 bool are_reshaped() const
1073 {
1074 return _are_reshaped;
1075 };
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001076 /** Return the number of convolution kernels
1077 *
1078 * @return The number of convolution kernels
1079 */
1080 unsigned int num_kernels() const
1081 {
1082 return _num_kernels;
1083 };
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001084 /** Return the width and height of the kernel
1085 *
1086 * @return The width and height of the kernel
1087 */
1088 std::pair<unsigned int, unsigned int> kernel_size() const
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001089 {
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001090 return std::make_pair(_kernel_width, _kernel_height);
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001091 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001092 bool retain_internal_weights() const
1093 {
1094 return _retain_internal_weights;
1095 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001096
1097private:
1098 const bool _are_reshaped;
Gian Marco Iodice4e288692017-06-27 11:41:59 +01001099 const unsigned int _kernel_width;
1100 const unsigned int _kernel_height;
Gian Marco Iodice559d7712017-08-08 08:38:09 +01001101 const unsigned int _num_kernels;
Michele Di Giorgiob62280a2018-05-31 17:31:05 +01001102 const bool _retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001103};
1104
Gian Marco36a0a462018-01-12 10:21:40 +00001105/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
1106 *
1107 * The matrix A can only be reshaped through @ref CLGEMMInterleave4x4Kernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel
1108 * Note: Optionally just for @ref CLGEMMInterleave4x4Kernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block
1109 *
1110 * The matrix B can only be reshaped through @ref CLGEMMTranspose1xWKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel
1111 * Note: Optionally just for @ref CLGEMMTranspose1xWKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block
1112 *
1113 */
1114class GEMMReshapeInfo final
1115{
1116public:
1117 /** Default constructor */
1118 GEMMReshapeInfo()
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001119 : _m(1), _n(1), _k(1), _mult_transpose1xW_width(1), _mult_interleave4x4_height(1), _depth_output_gemm3d(1), _reinterpret_input_as_3d(false)
Gian Marco36a0a462018-01-12 10:21:40 +00001120 {
1121 }
1122 /** Constructor
1123 *
1124 * @param[in] m Number of matrix A rows
1125 * @param[in] n Number of matrix B columns
1126 * @param[in] k Number of matrix A columns or matrix B rows
1127 * @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
1128 * @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleaved block
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001129 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001130 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1131 * to perform 1x1 convolutions with the NHWC data layout)
Gian Marco36a0a462018-01-12 10:21:40 +00001132 */
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001133 GEMMReshapeInfo(int m, int n, int k, int mult_transpose1xW_width = 1, int mult_interleave4x4_height = 1, int depth_output_gemm3d = 1, bool reinterpret_input_as_3d = false)
1134 : _m(m), _n(n), _k(k), _mult_transpose1xW_width(mult_transpose1xW_width), _mult_interleave4x4_height(mult_interleave4x4_height), _depth_output_gemm3d(depth_output_gemm3d),
1135 _reinterpret_input_as_3d(reinterpret_input_as_3d)
Gian Marco36a0a462018-01-12 10:21:40 +00001136 {
1137 }
1138 /** Number of matrix A rows
1139 *
1140 * @return the number of matrix A rows
1141 */
1142 int m() const
1143 {
1144 return _m;
1145 }
1146 /** Number of matrix B columns
1147 *
1148 * @return the number of matrix B columns
1149 */
1150 int n() const
1151 {
1152 return _n;
1153 }
1154 /** Number of matrix A columns or matrix B rows
1155 *
1156 * @return the number of matrix A columns or matrix B rows
1157 */
1158 int k() const
1159 {
1160 return _k;
1161 }
1162 /** Multiplication factor for the width of the 1xW transposed block
1163 *
1164 * @return the multiplication factor for the width of the 1xW transposed block
1165 */
1166 int mult_transpose1xW_width() const
1167 {
1168 return _mult_transpose1xW_width;
1169 }
1170 /** Multiplication factor for the height of the 4x4 interleaved block
1171 *
1172 * @return the multiplication factor for the height of the 4x4 interleaved block
1173 */
1174 int mult_interleave4x4_height() const
1175 {
1176 return _mult_interleave4x4_height;
1177 }
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001178 /** Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
1179 *
1180 * @note GEMM3D kernel is used when the output has to be reinterpret as 3D tensor. In that case:
1181 * m = depth_output_gemm3d * output_height
1182 *
1183 * @return the depth of the output tensor to be used with the GEMM3D kernel
1184 */
1185 int depth_output_gemm3d() const
1186 {
1187 return _depth_output_gemm3d;
1188 }
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001189 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1190 *
1191 * @return True if the input tensor has to be reinterpreted as 3D tensor
1192 */
1193 bool reinterpret_input_as_3d() const
1194 {
1195 return _reinterpret_input_as_3d;
1196 };
Gian Marco36a0a462018-01-12 10:21:40 +00001197
1198private:
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001199 const int _m;
1200 const int _n;
1201 const int _k;
1202 const int _mult_transpose1xW_width;
1203 const int _mult_interleave4x4_height;
1204 const int _depth_output_gemm3d;
1205 const bool _reinterpret_input_as_3d;
Gian Marco36a0a462018-01-12 10:21:40 +00001206};
1207
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001208/** GEMMLowp output stage type */
1209enum class GEMMLowpOutputStageType
1210{
1211 NONE, /**< No quantization to uint8 */
1212 QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */
1213 QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
1214 QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */
1215};
1216
1217/** GEMMLowp output stage info */
1218struct GEMMLowpOutputStageInfo
1219{
1220 GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
1221 int gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
1222 int gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
1223 int gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
1224 int gemmlowp_min_bound{ 0 }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
1225 int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
1226};
1227
Gian Marco36a0a462018-01-12 10:21:40 +00001228/** GEMM information class. This class stores the necessary information to compute GEMM functions
1229 *
1230 * This object also contains the information about how matrix A and matrix B have been reshaped
1231 *
1232 */
Chunosov5124be52017-11-22 20:42:13 +07001233class GEMMInfo
1234{
1235public:
1236 /** Default constructor */
1237 GEMMInfo()
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001238 : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false), _depth_output_gemm3d(1), _reinterpret_input_as_3d(false), _retain_internal_weights(false), _gemmlowp_output_stage()
Chunosov5124be52017-11-22 20:42:13 +07001239 {
1240 }
1241 /** Constructor
1242 *
1243 * @param[in] is_a_reshaped True if the matrix A has been reshaped
1244 * @param[in] is_b_reshaped True if the matrix B has been reshaped
1245 * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001246 * @param[in] depth_output_gemm3d (Optional) Depth (third dimension) of the output tensor to be used with the GEMM3D kernel
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001247 * @param[in] reinterpret_input_as_3d (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
1248 * to perform 1x1 convolutions with the NHWC data layout)
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001249 * @param[in] retain_internal_weights (Optional) Retain the weights tensor from previous run
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001250 * @param[in] gemmlowp_output_stage (Optional) GEMMLowp Output stage info
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001251 *
Chunosov5124be52017-11-22 20:42:13 +07001252 */
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001253 GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 1, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
1254 GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo())
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001255 : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001256 _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage)
Chunosov5124be52017-11-22 20:42:13 +07001257 {
1258 }
1259 /** Flag which specifies if the matrix A has been reshaped
1260 *
1261 * @return True if the matrix A has been reshaped
1262 */
1263 bool is_a_reshaped() const
1264 {
1265 return _is_a_reshaped;
1266 };
1267 /** Flag which specifies if the matrix B has been reshaped
1268 *
1269 * @return True if the matrix B has been reshaped
1270 */
1271 bool is_b_reshaped() const
1272 {
1273 return _is_b_reshaped;
1274 };
1275 /** Flag which specifies if the reshape of matrix B should executed only for the first
1276 *
1277 * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer
1278 *
1279 * @return True if the reshaped of matrix B happens only for the first run
1280 */
1281 bool reshape_b_only_on_first_run() const
1282 {
1283 return _reshape_b_only_on_first_run;
1284 };
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001285 /** Depth of the output when GEMM output is reinterpreted as 3D tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001286 *
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001287 * @return the depth of the output tensor
Gian Marco36a0a462018-01-12 10:21:40 +00001288 */
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001289 int depth_output_gemm3d() const
Gian Marco36a0a462018-01-12 10:21:40 +00001290 {
Isabella Gottardi8e74f442018-03-01 16:42:00 +00001291 return _depth_output_gemm3d;
1292 };
Gian Marco Iodice68a3f562018-07-26 11:44:03 +01001293 /** Flag which specifies if the input tensor has to be reinterpreted as 3D
1294 *
1295 * @return True if the input tensor has to be reinterpreted as 3D tensor
1296 */
1297 bool reinterpret_input_as_3d() const
1298 {
1299 return _reinterpret_input_as_3d;
1300 };
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +01001301 /** Flag which specifies if the weights tensor has to be retained from previous run
1302 *
1303 * @return True if the weights tensor has to be retained
1304 */
1305 bool retain_internal_weights() const
1306 {
1307 return _retain_internal_weights;
1308 };
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001309 /** GEMMLowp output stage
1310 *
1311 * @return the GEMMLowp output stage info
1312 */
1313 GEMMLowpOutputStageInfo gemmlowp_output_stage() const
1314 {
1315 return _gemmlowp_output_stage;
1316 };
Chunosov5124be52017-11-22 20:42:13 +07001317
1318private:
Gian Marco Iodice4b908652018-10-18 10:21:02 +01001319 const bool _is_a_reshaped;
1320 const bool _is_b_reshaped;
1321 const bool _reshape_b_only_on_first_run;
1322 const int _depth_output_gemm3d;
1323 const bool _reinterpret_input_as_3d;
1324 const bool _retain_internal_weights;
1325 const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
Chunosov5124be52017-11-22 20:42:13 +07001326};
1327
Gian Marco Iodice247f52c2018-03-22 11:24:56 +00001328/** Winograd information */
1329struct WinogradInfo
1330{
1331 /** Default constructor
1332 *
1333 * @param[in] output_tile_sz Width and height of the output tile
1334 * @param[in] kernel_sz Width and height of the kernel
1335 * @param[in] input_dims Width and height of the input tensor before the convolution is applied
1336 * @param[in] conv_info Convolution info (Pads, strides)
1337 * @param[in] data_layout Data layout to use for the output tensor once the convolution has been applied
1338 */
1339 WinogradInfo(Size2D output_tile_sz, Size2D kernel_sz, Size2D input_dims, PadStrideInfo conv_info, DataLayout data_layout)
1340 : output_tile_size(output_tile_sz), kernel_size(kernel_sz), input_dimensions(input_dims), convolution_info(conv_info), output_data_layout(data_layout)
1341 {
1342 }
1343
1344 Size2D output_tile_size{}; /**< Width and height of the output tile */
1345 Size2D kernel_size{}; /**< Width and height of the kernel*/
1346 Size2D input_dimensions{}; /**< Width and height of the input tensor before the convolution is applied */
1347 PadStrideInfo convolution_info{}; /**< Convolution info (Pads, strides,...) */
1348 DataLayout output_data_layout{ DataLayout::NCHW }; /**< Data layout to use for the output tensor once the convolution has been applied (NCHW or NHWC) */
1349};
1350
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001351/** IO formatting information class*/
1352struct IOFormatInfo
1353{
1354 /** Precision type used when printing floating point numbers */
1355 enum class PrecisionType
1356 {
1357 Default, /**< Default precision to the one that the current stream has */
1358 Custom, /**< Custom precision specified by the user using the precision parameter */
1359 Full /**< The maximum precision of the floating point representation */
1360 };
1361
1362 /** Specifies the area to be printed, used by Tensor objects */
1363 enum class PrintRegion
1364 {
1365 ValidRegion, /**< Prints the valid region of the Tensor object */
1366 NoPadding, /**< Prints the Tensor object without the padding */
1367 Full /**< Print the tensor object including padding */
1368 };
1369
Alex Gildayc357c472018-03-21 13:54:09 +00001370 /** Construct a set of IO formatting information.
1371 *
1372 * @param[in] print_region Area to be printed. Used by Tensor objects. Default: ValidRegion.
1373 * @param[in] precision_type Precision type for floating point numbers. Default: stream default.
1374 * @param[in] precision Precision value for float point numbers. Default: 10.
1375 * @param[in] align_columns Whether to align columns when printed. Default: true.
1376 * @param[in] element_delim Delimeter between elements. Default: " ".
1377 * @param[in] row_delim Delimenter between rows. Default: "\n".
1378 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001379 IOFormatInfo(PrintRegion print_region = PrintRegion::ValidRegion,
1380 PrecisionType precision_type = PrecisionType::Default,
1381 unsigned int precision = 10,
1382 bool align_columns = true,
1383 std::string element_delim = " ",
1384 std::string row_delim = "\n")
1385 : print_region(print_region),
1386 precision_type(precision_type),
1387 precision(precision),
1388 element_delim(element_delim),
1389 row_delim(row_delim),
1390 align_columns(align_columns)
1391 {
1392 }
1393
Alex Gildayc357c472018-03-21 13:54:09 +00001394 /** Area to be printed by Tensor objects */
1395 PrintRegion print_region;
1396 /** Floating point precision type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001397 PrecisionType precision_type;
Alex Gildayc357c472018-03-21 13:54:09 +00001398 /** Floating point precision */
1399 unsigned int precision;
1400 /** Element delimeter */
1401 std::string element_delim;
1402 /** Row delimeter */
1403 std::string row_delim;
1404 /** Align columns */
1405 bool align_columns;
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001406};
Isabella Gottardif07d28d2018-02-06 14:52:43 +00001407
1408/** Available ConvolutionMethod*/
1409enum class ConvolutionMethod
1410{
1411 GEMM, /**< Convolution using GEMM */
1412 DIRECT, /**< Direct convolution */
1413 WINOGRAD /**< Convolution using Winograd */
1414};
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001415} // namespace arm_compute
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001416#endif /* __ARM_COMPUTE_TYPES_H__ */