blob: d8818ce209d7ca6e96854bbb5921e5e694e39a88 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
telsoa014fcda012018-03-09 14:13:49 +00007#include <armnn/Descriptors.hpp>
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +00008#include <armnn/Tensor.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01009#include <armnn/utility/Assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <arm_compute/core/Types.h>
12
13namespace armnn
14{
15
16inline arm_compute::NormalizationLayerInfo
Matteo Martincigh539b44d2018-10-01 09:26:39 +010017CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
18 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000019{
Matteo Martincigh539b44d2018-10-01 09:26:39 +010020 unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
21 const unsigned int depth = tensorInfo.GetShape()[depthDimension];
telsoa014fcda012018-03-09 14:13:49 +000022
23 // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
24 // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
25 // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
26 // parameters.
27 //
28 // Please refer to both the reference implementation of the normalization layer and the implementation of
29 // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
30
31 // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
32 // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
33 // ACL's normalization_layer_cross_map() CL function.
34 const uint32_t normSize = depth * 2u + 1u;
35
36 // See ACL's NormalizationLayerInfo::scale_coeff() definition.
37 // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
38 const float alpha = 1.0f;
39
telsoa01c577f2c2018-08-31 09:22:23 +010040 // Don't offset the reduction.
telsoa014fcda012018-03-09 14:13:49 +000041 const float kappa = 0.0f;
42
43 // pow(reduction, -0.5) = 1 / sqrt(reduction)
44 const float beta = 0.5f;
45
46 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
47}
48
49inline arm_compute::ActivationLayerInfo::ActivationFunction
50ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
51{
52 using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
53
54 switch (armnnFunction)
55 {
56 case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
telsoa01c577f2c2018-08-31 09:22:23 +010057 // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
telsoa014fcda012018-03-09 14:13:49 +000058 case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
59 case ActivationFunction::ReLu: return AclActivationFunction::RELU;
60 case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
61 case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
62 case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
63 case ActivationFunction::Abs: return AclActivationFunction::ABS;
64 case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
65 case ActivationFunction::Square: return AclActivationFunction::SQUARE;
66 case ActivationFunction::TanH: return AclActivationFunction::TANH;
David Monahan3b3c3812020-02-25 09:03:29 +000067 case ActivationFunction::Elu: return AclActivationFunction::ELU;
Jan Eilersa83af7b2020-03-18 15:58:11 +000068 case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH;
telsoa014fcda012018-03-09 14:13:49 +000069 default: throw InvalidArgumentException("Unsupported activation function");
70 }
71}
72
73inline arm_compute::ActivationLayerInfo
74ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc)
75{
76 return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
77 actDesc.m_A, actDesc.m_B);
78}
79
Teresa Charlin2b030d92020-03-27 16:40:56 +000080inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor)
81{
82 switch (descriptor.m_Operation)
83 {
84 case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater;
85 case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual;
86 case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less;
87 case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual;
88 case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal;
89 case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual;
90 default: throw InvalidArgumentException("Unsupported comparison function");
91 }
92}
93
telsoa014fcda012018-03-09 14:13:49 +000094inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
95{
96 using arm_compute::PoolingType;
97
98 switch (poolingAlgorithm)
99 {
100 case PoolingAlgorithm::Max: return PoolingType::MAX;
101 case PoolingAlgorithm::Average: return PoolingType::AVG;
102 case PoolingAlgorithm::L2: return PoolingType::L2;
103 default: throw InvalidArgumentException("Unsupported pooling algorithm");
104 }
105}
106
107inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding
108 rounding)
109{
110 using arm_compute::DimensionRoundingType;
111
112 switch (rounding)
113 {
114 case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
115 case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
116 default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
117 }
118}
119
120inline arm_compute::NormType
121ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
122{
123 using arm_compute::NormType;
124 switch (channelType)
125 {
126 case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
127 case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
128 default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
129 }
130}
131
telsoa01c577f2c2018-08-31 09:22:23 +0100132inline arm_compute::FullyConnectedLayerInfo
133ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc)
134{
135 arm_compute::FullyConnectedLayerInfo fc_info;
136 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
137 return fc_info;
138}
139
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100140inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
141{
142 switch (resizeMethod)
143 {
144 case ResizeMethod::Bilinear:
145 return arm_compute::InterpolationPolicy::BILINEAR;
146 case ResizeMethod::NearestNeighbor:
147 return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
148 default:
149 throw InvalidArgumentException("Unsupported resize method");
150 }
151}
152
Teresa Charlinc1f6b092020-05-11 16:10:38 +0100153template<typename T>
154inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000155{
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100156 // Detect the Android default value of -1 and return the ACL default value of 1.
157 if (softmaxDesc.m_Axis == -1)
158 {
159 return 1;
160 }
161
162 unsigned int dim = tensor.GetNumDimensions();
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000163
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100164 ARMNN_ASSERT(dim != 0);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000165
166 // Currently ArmNN support axis 1.
Teresa Charlinc1f6b092020-05-11 16:10:38 +0100167 return static_cast<T>(dim) - 1;
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000168}
169
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100170inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
171{
172 unsigned int numSplit = desc.GetNumViews();
173 unsigned int numDimensions = desc.GetNumDimensions();
174 std::set<unsigned int> splitAxis;
175
176 for (unsigned int i = 0; i < numSplit; ++i)
177 {
178 for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
179 {
180 if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
181 {
182 splitAxis.insert(dimIdx);
183 }
184 }
185 }
186 return splitAxis;
187}
188
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +0000189} // namespace armnn