blob: e5a7fc0987195f29529d29fe148645f665715222 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <array>
Matthew Bentham47bfac42019-03-25 12:30:56 +00008#include <functional>
David Beckdcb751f2018-10-03 11:42:42 +01009#include <memory>
janeil01c4946c72019-11-07 09:32:28 +000010#include <stdint.h>
Jim Flynn44db7c32019-03-22 15:58:39 +000011#include "BackendId.hpp"
12#include "Exceptions.hpp"
Derek Lambertif90c56d2020-01-10 17:14:08 +000013#include "Deprecated.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
15namespace armnn
16{
17
Matthew Jacksondba634f2019-08-15 15:14:18 +010018constexpr unsigned int MaxNumOfTensorDimensions = 5U;
telsoa014fcda012018-03-09 14:13:49 +000019
Colm Donelan02705242019-11-14 14:19:07 +000020// The lowest performance data capture interval we support is 10 miliseconds.
21constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22
telsoa014fcda012018-03-09 14:13:49 +000023/// @enum Status enumeration
24/// @var Status::Successful
25/// @var Status::Failure
26enum class Status
27{
28 Success = 0,
29 Failure = 1
30};
31
32enum class DataType
33{
telsoa01c577f2c2018-08-31 09:22:23 +010034 Float16 = 0,
ruoyan0120e984f2018-12-12 18:11:25 +000035 Float32 = 1,
Derek Lambertif90c56d2020-01-10 17:14:08 +000036 QAsymmU8 = 2,
ruoyan0120e984f2018-12-12 18:11:25 +000037 Signed32 = 3,
Nattapat Chaimanowongcd5ac232019-03-19 12:26:36 +000038 Boolean = 4,
Derek Lambertif90c56d2020-01-10 17:14:08 +000039 QSymmS16 = 5,
Mike Kelly49d0f122019-11-19 09:12:19 +000040 QuantizedSymm8PerAxis = 6,
Derek Lambertif90c56d2020-01-10 17:14:08 +000041 QSymmS8 = 7,
42
43 QuantisedAsymm8 ARMNN_DEPRECATED_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
44 QuantisedSymm16 ARMNN_DEPRECATED_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
telsoa014fcda012018-03-09 14:13:49 +000045};
46
Derek Lamberti0cff1632018-09-18 16:02:25 +010047enum class DataLayout
48{
49 NCHW = 1,
50 NHWC = 2
51};
52
telsoa014fcda012018-03-09 14:13:49 +000053enum class ActivationFunction
54{
55 Sigmoid = 0,
56 TanH = 1,
57 Linear = 2,
58 ReLu = 3,
telsoa01c577f2c2018-08-31 09:22:23 +010059 BoundedReLu = 4, ///< min(a, max(b, input))
telsoa014fcda012018-03-09 14:13:49 +000060 SoftReLu = 5,
61 LeakyReLu = 6,
62 Abs = 7,
63 Sqrt = 8,
64 Square = 9
65};
66
Narumol Prangnawarat8d001d42019-09-09 15:01:18 +010067enum class ArgMinMaxFunction
68{
69 Min = 0,
70 Max = 1
71};
72
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +010073enum class ComparisonOperation
74{
75 Equal = 0,
76 Greater = 1,
77 GreaterOrEqual = 2,
78 Less = 3,
79 LessOrEqual = 4,
80 NotEqual = 5
81};
82
telsoa014fcda012018-03-09 14:13:49 +000083enum class PoolingAlgorithm
84{
85 Max = 0,
86 Average = 1,
87 L2 = 2
88};
89
Teresa Charlina9075df2019-06-27 15:41:57 +010090enum class ResizeMethod
91{
92 Bilinear = 0,
93 NearestNeighbor = 1
94};
95
telsoa014fcda012018-03-09 14:13:49 +000096///
97/// The padding method modifies the output of pooling layers.
98/// In both supported methods, the values are ignored (they are
telsoa01c577f2c2018-08-31 09:22:23 +010099/// not even zeroes, which would make a difference for max pooling
telsoa014fcda012018-03-09 14:13:49 +0000100/// a tensor with negative values). The difference between
telsoa01c577f2c2018-08-31 09:22:23 +0100101/// IgnoreValue and Exclude is that the former counts the padding
telsoa014fcda012018-03-09 14:13:49 +0000102/// fields in the divisor of Average and L2 pooling, while
103/// Exclude does not.
104///
105enum class PaddingMethod
106{
telsoa01c577f2c2018-08-31 09:22:23 +0100107 /// The padding fields count, but are ignored
David Beckdcb751f2018-10-03 11:42:42 +0100108 IgnoreValue = 0,
telsoa01c577f2c2018-08-31 09:22:23 +0100109 /// The padding fields don't count and are ignored
David Beckdcb751f2018-10-03 11:42:42 +0100110 Exclude = 1
telsoa014fcda012018-03-09 14:13:49 +0000111};
112
113enum class NormalizationAlgorithmChannel
114{
115 Across = 0,
116 Within = 1
117};
118
119enum class NormalizationAlgorithmMethod
120{
David Beckdcb751f2018-10-03 11:42:42 +0100121 /// Krichevsky 2012: Local Brightness Normalization
122 LocalBrightness = 0,
123 /// Jarret 2009: Local Contrast Normalization
telsoa01c577f2c2018-08-31 09:22:23 +0100124 LocalContrast = 1
telsoa014fcda012018-03-09 14:13:49 +0000125};
126
127enum class OutputShapeRounding
128{
129 Floor = 0,
130 Ceiling = 1
131};
132
David Beck9efb57d2018-11-05 13:40:33 +0000133/// Each backend should implement an IBackend.
134class IBackend
135{
136protected:
137 IBackend() {}
138 virtual ~IBackend() {}
139
140public:
141 virtual const BackendId& GetId() const = 0;
142};
143
144using IBackendSharedPtr = std::shared_ptr<IBackend>;
145using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
146
David Beckdcb751f2018-10-03 11:42:42 +0100147/// Device specific knowledge to be passed to the optimizer.
telsoa01c577f2c2018-08-31 09:22:23 +0100148class IDeviceSpec
telsoa014fcda012018-03-09 14:13:49 +0000149{
telsoa01c577f2c2018-08-31 09:22:23 +0100150protected:
Matteo Martincigh9c5d33a2019-02-07 17:52:41 +0000151 IDeviceSpec() {}
152 virtual ~IDeviceSpec() {}
Narumol Prangnawarat87106762019-05-03 15:54:39 +0100153public:
154 virtual const BackendIdSet& GetSupportedBackends() const = 0;
telsoa014fcda012018-03-09 14:13:49 +0000155};
156
157/// Type of identifiers for bindable layers (inputs, outputs).
158using LayerBindingId = int;
159
160class PermutationVector
161{
162public:
163 using ValueType = unsigned int;
164 using SizeType = unsigned int;
165 using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
166 using ConstIterator = typename ArrayType::const_iterator;
167
telsoa01c577f2c2018-08-31 09:22:23 +0100168 /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
telsoa014fcda012018-03-09 14:13:49 +0000169 /// when source and target potentially have different memory layouts.
170 ///
telsoa01c577f2c2018-08-31 09:22:23 +0100171 /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
telsoa014fcda012018-03-09 14:13:49 +0000172 /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
173 /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
174 /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
175 /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
176 /// [ 0, 2, 3, 1 ].
177 ///
178 /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
179 /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
180 /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
181 /// [ 0, 3, 1, 2 ].
182 ///
183 PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
184
185 PermutationVector(std::initializer_list<ValueType> dimMappings);
186
187 ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
188
189 SizeType GetSize() const { return m_NumDimMappings; }
190
191 ConstIterator begin() const { return m_DimMappings.begin(); }
192 ConstIterator end() const { return m_DimMappings.end(); }
193
194 bool IsEqual(const PermutationVector& other) const
195 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100196 if (m_NumDimMappings != other.m_NumDimMappings) return false;
197 for (unsigned int i = 0; i < m_NumDimMappings; ++i)
198 {
199 if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
200 }
201 return true;
telsoa014fcda012018-03-09 14:13:49 +0000202 }
203
204 bool IsInverse(const PermutationVector& other) const
205 {
206 bool isInverse = (GetSize() == other.GetSize());
207 for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
208 {
209 isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
210 }
211 return isInverse;
212 }
213
214private:
215 ArrayType m_DimMappings;
216 /// Number of valid entries in @ref m_DimMappings
217 SizeType m_NumDimMappings;
218};
219
janeil013fec1ea2019-11-07 09:47:20 +0000220namespace profiling { class ProfilingGuid; }
221
telsoa01c577f2c2018-08-31 09:22:23 +0100222/// Define LayerGuid type.
janeil013fec1ea2019-11-07 09:47:20 +0000223using LayerGuid = profiling::ProfilingGuid;
surmeh01bceff2f2018-03-29 16:29:27 +0100224
Nattapat Chaimanowong6e948202019-03-22 14:01:46 +0000225class ITensorHandle;
226
Nattapat Chaimanowong317cae52019-03-28 10:29:12 +0000227/// Define the type of callback for the Debug layer to call
228/// @param guid - guid of layer connected to the input of the Debug layer
229/// @param slotIndex - index of the output slot connected to the input of the Debug layer
230/// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
231using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
Nattapat Chaimanowong6e948202019-03-22 14:01:46 +0000232
janeil01c4946c72019-11-07 09:32:28 +0000233
234namespace profiling
235{
236
Narumol Prangnawaratdbdd1b42019-11-15 17:38:44 +0000237static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
238
janeil01c4946c72019-11-07 09:32:28 +0000239class ProfilingGuid
240{
241public:
242 ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
243
244 operator uint64_t() const { return m_Guid; }
245
246 bool operator==(const ProfilingGuid& other) const
247 {
248 return m_Guid == other.m_Guid;
249 }
250
251 bool operator!=(const ProfilingGuid& other) const
252 {
253 return m_Guid != other.m_Guid;
254 }
255
256 bool operator<(const ProfilingGuid& other) const
257 {
258 return m_Guid < other.m_Guid;
259 }
260
261 bool operator<=(const ProfilingGuid& other) const
262 {
263 return m_Guid <= other.m_Guid;
264 }
265
266 bool operator>(const ProfilingGuid& other) const
267 {
268 return m_Guid > other.m_Guid;
269 }
270
271 bool operator>=(const ProfilingGuid& other) const
272 {
273 return m_Guid >= other.m_Guid;
274 }
275
276protected:
277 uint64_t m_Guid;
278};
279
280/// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
281struct ProfilingDynamicGuid : public ProfilingGuid
282{
283 using ProfilingGuid::ProfilingGuid;
284};
285
286struct ProfilingStaticGuid : public ProfilingGuid
287{
288 using ProfilingGuid::ProfilingGuid;
289};
290
291} // namespace profiling
292
David Beck9df2d952018-10-10 15:11:44 +0100293} // namespace armnn
janeil01c4946c72019-11-07 09:32:28 +0000294
295
296namespace std
297{
298// make ProfilingGuid hashable
299template<>
300struct hash<armnn::profiling::ProfilingGuid>
301{
302 std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
303 {
304 return hash<uint64_t>()(uint64_t(guid));
305 }
306};
307
308// make ProfilingDynamicGuid hashable
309template<>
310struct hash<armnn::profiling::ProfilingDynamicGuid>
311{
312 std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
313 {
314 return hash<uint64_t>()(uint64_t(guid));
315 }
316};
317
318// make ProfilingStaticGuid hashable
319template<>
320struct hash<armnn::profiling::ProfilingStaticGuid>
321{
322 std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
323 {
324 return hash<uint64_t>()(uint64_t(guid));
325 }
326};
janeil013fec1ea2019-11-07 09:47:20 +0000327} // namespace std