blob: d66a1fda90f5f978c66772003072cf531da0d858 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <array>
Matthew Bentham47bfac42019-03-25 12:30:56 +00008#include <functional>
David Beckdcb751f2018-10-03 11:42:42 +01009#include <memory>
janeil01c4946c72019-11-07 09:32:28 +000010#include <stdint.h>
Jim Flynn44db7c32019-03-22 15:58:39 +000011#include "BackendId.hpp"
12#include "Exceptions.hpp"
Derek Lambertif90c56d2020-01-10 17:14:08 +000013#include "Deprecated.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
15namespace armnn
16{
17
Matthew Jacksondba634f2019-08-15 15:14:18 +010018constexpr unsigned int MaxNumOfTensorDimensions = 5U;
telsoa014fcda012018-03-09 14:13:49 +000019
Colm Donelan02705242019-11-14 14:19:07 +000020// The lowest performance data capture interval we support is 10 miliseconds.
21constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22
telsoa014fcda012018-03-09 14:13:49 +000023/// @enum Status enumeration
24/// @var Status::Successful
25/// @var Status::Failure
26enum class Status
27{
28 Success = 0,
29 Failure = 1
30};
31
32enum class DataType
33{
telsoa01c577f2c2018-08-31 09:22:23 +010034 Float16 = 0,
ruoyan0120e984f2018-12-12 18:11:25 +000035 Float32 = 1,
Derek Lambertif90c56d2020-01-10 17:14:08 +000036 QAsymmU8 = 2,
ruoyan0120e984f2018-12-12 18:11:25 +000037 Signed32 = 3,
Nattapat Chaimanowongcd5ac232019-03-19 12:26:36 +000038 Boolean = 4,
Derek Lambertif90c56d2020-01-10 17:14:08 +000039 QSymmS16 = 5,
Derek Lambertid466a542020-01-22 15:37:29 +000040 QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
Derek Lambertif90c56d2020-01-10 17:14:08 +000041 QSymmS8 = 7,
Ryan OShea9add1202020-02-07 10:06:33 +000042 QAsymmS8 = 8,
Derek Lambertif90c56d2020-01-10 17:14:08 +000043
Derek Lamberti41e92b02020-01-21 13:43:21 +000044 QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
45 QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
telsoa014fcda012018-03-09 14:13:49 +000046};
47
Derek Lamberti0cff1632018-09-18 16:02:25 +010048enum class DataLayout
49{
50 NCHW = 1,
51 NHWC = 2
52};
53
telsoa014fcda012018-03-09 14:13:49 +000054enum class ActivationFunction
55{
56 Sigmoid = 0,
57 TanH = 1,
58 Linear = 2,
59 ReLu = 3,
Colm Donelan03fbeaf2020-02-26 15:39:23 +000060 BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
telsoa014fcda012018-03-09 14:13:49 +000061 SoftReLu = 5,
62 LeakyReLu = 6,
63 Abs = 7,
64 Sqrt = 8,
David Monahan3b3c3812020-02-25 09:03:29 +000065 Square = 9,
Colm Donelan03fbeaf2020-02-26 15:39:23 +000066 Elu = 10,
67 HardSwish = 11
telsoa014fcda012018-03-09 14:13:49 +000068};
69
Narumol Prangnawarat8d001d42019-09-09 15:01:18 +010070enum class ArgMinMaxFunction
71{
72 Min = 0,
73 Max = 1
74};
75
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +010076enum class ComparisonOperation
77{
78 Equal = 0,
79 Greater = 1,
80 GreaterOrEqual = 2,
81 Less = 3,
82 LessOrEqual = 4,
83 NotEqual = 5
84};
85
josh minor4a3c6102020-01-06 16:40:46 -060086enum class UnaryOperation
87{
88 Abs = 0,
89 Exp = 1,
90 Sqrt = 2,
91 Rsqrt = 3,
92 Neg = 4
93};
94
telsoa014fcda012018-03-09 14:13:49 +000095enum class PoolingAlgorithm
96{
97 Max = 0,
98 Average = 1,
99 L2 = 2
100};
101
Teresa Charlina9075df2019-06-27 15:41:57 +0100102enum class ResizeMethod
103{
104 Bilinear = 0,
105 NearestNeighbor = 1
106};
107
telsoa014fcda012018-03-09 14:13:49 +0000108///
109/// The padding method modifies the output of pooling layers.
110/// In both supported methods, the values are ignored (they are
telsoa01c577f2c2018-08-31 09:22:23 +0100111/// not even zeroes, which would make a difference for max pooling
telsoa014fcda012018-03-09 14:13:49 +0000112/// a tensor with negative values). The difference between
telsoa01c577f2c2018-08-31 09:22:23 +0100113/// IgnoreValue and Exclude is that the former counts the padding
telsoa014fcda012018-03-09 14:13:49 +0000114/// fields in the divisor of Average and L2 pooling, while
115/// Exclude does not.
116///
117enum class PaddingMethod
118{
telsoa01c577f2c2018-08-31 09:22:23 +0100119 /// The padding fields count, but are ignored
David Beckdcb751f2018-10-03 11:42:42 +0100120 IgnoreValue = 0,
telsoa01c577f2c2018-08-31 09:22:23 +0100121 /// The padding fields don't count and are ignored
David Beckdcb751f2018-10-03 11:42:42 +0100122 Exclude = 1
telsoa014fcda012018-03-09 14:13:49 +0000123};
124
125enum class NormalizationAlgorithmChannel
126{
127 Across = 0,
128 Within = 1
129};
130
131enum class NormalizationAlgorithmMethod
132{
David Beckdcb751f2018-10-03 11:42:42 +0100133 /// Krichevsky 2012: Local Brightness Normalization
134 LocalBrightness = 0,
135 /// Jarret 2009: Local Contrast Normalization
telsoa01c577f2c2018-08-31 09:22:23 +0100136 LocalContrast = 1
telsoa014fcda012018-03-09 14:13:49 +0000137};
138
139enum class OutputShapeRounding
140{
141 Floor = 0,
142 Ceiling = 1
143};
144
David Beck9efb57d2018-11-05 13:40:33 +0000145/// Each backend should implement an IBackend.
146class IBackend
147{
148protected:
149 IBackend() {}
150 virtual ~IBackend() {}
151
152public:
153 virtual const BackendId& GetId() const = 0;
154};
155
156using IBackendSharedPtr = std::shared_ptr<IBackend>;
157using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
158
David Beckdcb751f2018-10-03 11:42:42 +0100159/// Device specific knowledge to be passed to the optimizer.
telsoa01c577f2c2018-08-31 09:22:23 +0100160class IDeviceSpec
telsoa014fcda012018-03-09 14:13:49 +0000161{
telsoa01c577f2c2018-08-31 09:22:23 +0100162protected:
Matteo Martincigh9c5d33a2019-02-07 17:52:41 +0000163 IDeviceSpec() {}
164 virtual ~IDeviceSpec() {}
Narumol Prangnawarat87106762019-05-03 15:54:39 +0100165public:
166 virtual const BackendIdSet& GetSupportedBackends() const = 0;
telsoa014fcda012018-03-09 14:13:49 +0000167};
168
169/// Type of identifiers for bindable layers (inputs, outputs).
170using LayerBindingId = int;
171
172class PermutationVector
173{
174public:
175 using ValueType = unsigned int;
176 using SizeType = unsigned int;
177 using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
178 using ConstIterator = typename ArrayType::const_iterator;
179
telsoa01c577f2c2018-08-31 09:22:23 +0100180 /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
telsoa014fcda012018-03-09 14:13:49 +0000181 /// when source and target potentially have different memory layouts.
182 ///
telsoa01c577f2c2018-08-31 09:22:23 +0100183 /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
telsoa014fcda012018-03-09 14:13:49 +0000184 /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
185 /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
186 /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
187 /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
188 /// [ 0, 2, 3, 1 ].
189 ///
190 /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
191 /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
192 /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
193 /// [ 0, 3, 1, 2 ].
194 ///
195 PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
196
197 PermutationVector(std::initializer_list<ValueType> dimMappings);
198
199 ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
200
201 SizeType GetSize() const { return m_NumDimMappings; }
202
203 ConstIterator begin() const { return m_DimMappings.begin(); }
204 ConstIterator end() const { return m_DimMappings.end(); }
205
206 bool IsEqual(const PermutationVector& other) const
207 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100208 if (m_NumDimMappings != other.m_NumDimMappings) return false;
209 for (unsigned int i = 0; i < m_NumDimMappings; ++i)
210 {
211 if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
212 }
213 return true;
telsoa014fcda012018-03-09 14:13:49 +0000214 }
215
216 bool IsInverse(const PermutationVector& other) const
217 {
218 bool isInverse = (GetSize() == other.GetSize());
219 for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
220 {
221 isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
222 }
223 return isInverse;
224 }
225
226private:
227 ArrayType m_DimMappings;
228 /// Number of valid entries in @ref m_DimMappings
229 SizeType m_NumDimMappings;
230};
231
janeil013fec1ea2019-11-07 09:47:20 +0000232namespace profiling { class ProfilingGuid; }
233
telsoa01c577f2c2018-08-31 09:22:23 +0100234/// Define LayerGuid type.
janeil013fec1ea2019-11-07 09:47:20 +0000235using LayerGuid = profiling::ProfilingGuid;
surmeh01bceff2f2018-03-29 16:29:27 +0100236
Nattapat Chaimanowong6e948202019-03-22 14:01:46 +0000237class ITensorHandle;
238
Nattapat Chaimanowong317cae52019-03-28 10:29:12 +0000239/// Define the type of callback for the Debug layer to call
240/// @param guid - guid of layer connected to the input of the Debug layer
241/// @param slotIndex - index of the output slot connected to the input of the Debug layer
242/// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
243using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
Nattapat Chaimanowong6e948202019-03-22 14:01:46 +0000244
janeil01c4946c72019-11-07 09:32:28 +0000245
246namespace profiling
247{
248
Narumol Prangnawaratdbdd1b42019-11-15 17:38:44 +0000249static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
250
janeil01c4946c72019-11-07 09:32:28 +0000251class ProfilingGuid
252{
253public:
254 ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
255
256 operator uint64_t() const { return m_Guid; }
257
258 bool operator==(const ProfilingGuid& other) const
259 {
260 return m_Guid == other.m_Guid;
261 }
262
263 bool operator!=(const ProfilingGuid& other) const
264 {
265 return m_Guid != other.m_Guid;
266 }
267
268 bool operator<(const ProfilingGuid& other) const
269 {
270 return m_Guid < other.m_Guid;
271 }
272
273 bool operator<=(const ProfilingGuid& other) const
274 {
275 return m_Guid <= other.m_Guid;
276 }
277
278 bool operator>(const ProfilingGuid& other) const
279 {
280 return m_Guid > other.m_Guid;
281 }
282
283 bool operator>=(const ProfilingGuid& other) const
284 {
285 return m_Guid >= other.m_Guid;
286 }
287
288protected:
289 uint64_t m_Guid;
290};
291
292/// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
293struct ProfilingDynamicGuid : public ProfilingGuid
294{
295 using ProfilingGuid::ProfilingGuid;
296};
297
298struct ProfilingStaticGuid : public ProfilingGuid
299{
300 using ProfilingGuid::ProfilingGuid;
301};
302
303} // namespace profiling
304
David Beck9df2d952018-10-10 15:11:44 +0100305} // namespace armnn
janeil01c4946c72019-11-07 09:32:28 +0000306
307
308namespace std
309{
310// make ProfilingGuid hashable
311template<>
312struct hash<armnn::profiling::ProfilingGuid>
313{
314 std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
315 {
316 return hash<uint64_t>()(uint64_t(guid));
317 }
318};
319
320// make ProfilingDynamicGuid hashable
321template<>
322struct hash<armnn::profiling::ProfilingDynamicGuid>
323{
324 std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
325 {
326 return hash<uint64_t>()(uint64_t(guid));
327 }
328};
329
330// make ProfilingStaticGuid hashable
331template<>
332struct hash<armnn::profiling::ProfilingStaticGuid>
333{
334 std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
335 {
336 return hash<uint64_t>()(uint64_t(guid));
337 }
338};
janeil013fec1ea2019-11-07 09:47:20 +0000339} // namespace std