telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame^] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // See LICENSE file in the project root for full license information. |
| 4 | // |
| 5 | #pragma once |
| 6 | |
| 7 | #include <array> |
| 8 | |
| 9 | namespace armnn |
| 10 | { |
| 11 | |
| 12 | constexpr unsigned int MaxNumOfTensorDimensions = 4U; |
| 13 | |
| 14 | /// @enum Status enumeration |
| 15 | /// @var Status::Successful |
| 16 | /// @var Status::Failure |
| 17 | enum class Status |
| 18 | { |
| 19 | Success = 0, |
| 20 | Failure = 1 |
| 21 | }; |
| 22 | |
| 23 | enum class DataType |
| 24 | { |
| 25 | Float32 = 0, |
| 26 | QuantisedAsymm8 = 1, |
| 27 | Signed32 = 2 |
| 28 | }; |
| 29 | |
| 30 | enum class ActivationFunction |
| 31 | { |
| 32 | Sigmoid = 0, |
| 33 | TanH = 1, |
| 34 | Linear = 2, |
| 35 | ReLu = 3, |
| 36 | BoundedReLu = 4, //< min(a, max(b, input)) |
| 37 | SoftReLu = 5, |
| 38 | LeakyReLu = 6, |
| 39 | Abs = 7, |
| 40 | Sqrt = 8, |
| 41 | Square = 9 |
| 42 | }; |
| 43 | |
| 44 | enum class PoolingAlgorithm |
| 45 | { |
| 46 | Max = 0, |
| 47 | Average = 1, |
| 48 | L2 = 2 |
| 49 | }; |
| 50 | |
| 51 | /// |
| 52 | /// The padding method modifies the output of pooling layers. |
| 53 | /// In both supported methods, the values are ignored (they are |
| 54 | /// not even zeros which would make a difference for max pooling |
| 55 | /// a tensor with negative values). The difference between |
| 56 | /// IgnoreValue and Exclude is that the former count the padding |
| 57 | /// fields in the divisor of Average and L2 pooling, while |
| 58 | /// Exclude does not. |
| 59 | /// |
| 60 | enum class PaddingMethod |
| 61 | { |
| 62 | IgnoreValue = 0, // The padding fields count, but ignored |
| 63 | Exclude = 1 // The padding fields don't count and ignored |
| 64 | }; |
| 65 | |
| 66 | enum class NormalizationAlgorithmChannel |
| 67 | { |
| 68 | Across = 0, |
| 69 | Within = 1 |
| 70 | }; |
| 71 | |
| 72 | enum class NormalizationAlgorithmMethod |
| 73 | { |
| 74 | LocalBrightness = 0, /* Krichevsky 2012: Local Brightness Normalization */ |
| 75 | LocalContrast = 1 /* Jarret 2009: Local Contrast Normalization */ |
| 76 | }; |
| 77 | |
| 78 | enum class OutputShapeRounding |
| 79 | { |
| 80 | Floor = 0, |
| 81 | Ceiling = 1 |
| 82 | }; |
| 83 | |
| 84 | enum class Compute |
| 85 | { |
| 86 | CpuRef = 0, // CPU Execution: Reference C++ kernels |
| 87 | CpuAcc = 1, // CPU Execution: NEON: ArmCompute |
| 88 | GpuAcc = 2, // GPU Execution: OpenCL: ArmCompute |
| 89 | Undefined = 5 |
| 90 | }; |
| 91 | |
| 92 | struct DeviceSpec |
| 93 | { |
| 94 | Compute DefaultComputeDevice; |
| 95 | }; |
| 96 | |
| 97 | /// Type of identifiers for bindable layers (inputs, outputs). |
| 98 | using LayerBindingId = int; |
| 99 | |
| 100 | class PermutationVector |
| 101 | { |
| 102 | public: |
| 103 | using ValueType = unsigned int; |
| 104 | using SizeType = unsigned int; |
| 105 | using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>; |
| 106 | using ConstIterator = typename ArrayType::const_iterator; |
| 107 | |
| 108 | /// @param dimMappings Indicates how to translate tensor elements from a given source into the target destination, |
| 109 | /// when source and target potentially have different memory layouts. |
| 110 | /// |
| 111 | /// E.g. For a 4-d tensor laid out in memory with format (Batch Element, Height, Width, Channels), |
| 112 | /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding |
| 113 | /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped |
| 114 | /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and |
| 115 | /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array: |
| 116 | /// [ 0, 2, 3, 1 ]. |
| 117 | /// |
| 118 | /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element, |
| 119 | /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have |
| 120 | /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents: |
| 121 | /// [ 0, 3, 1, 2 ]. |
| 122 | /// |
| 123 | PermutationVector(const ValueType *dimMappings, SizeType numDimMappings); |
| 124 | |
| 125 | PermutationVector(std::initializer_list<ValueType> dimMappings); |
| 126 | |
| 127 | ValueType operator[](SizeType i) const { return m_DimMappings.at(i); } |
| 128 | |
| 129 | SizeType GetSize() const { return m_NumDimMappings; } |
| 130 | |
| 131 | ConstIterator begin() const { return m_DimMappings.begin(); } |
| 132 | ConstIterator end() const { return m_DimMappings.end(); } |
| 133 | |
| 134 | bool IsEqual(const PermutationVector& other) const |
| 135 | { |
| 136 | return std::equal(begin(), end(), other.begin(), other.end()); |
| 137 | } |
| 138 | |
| 139 | bool IsInverse(const PermutationVector& other) const |
| 140 | { |
| 141 | bool isInverse = (GetSize() == other.GetSize()); |
| 142 | for (SizeType i = 0; isInverse && (i < GetSize()); ++i) |
| 143 | { |
| 144 | isInverse = (m_DimMappings[other.m_DimMappings[i]] == i); |
| 145 | } |
| 146 | return isInverse; |
| 147 | } |
| 148 | |
| 149 | private: |
| 150 | ArrayType m_DimMappings; |
| 151 | /// Number of valid entries in @ref m_DimMappings |
| 152 | SizeType m_NumDimMappings; |
| 153 | }; |
| 154 | |
| 155 | } |