blob: 4d690901c6db6133d3623b399a1fa64dd6fcb647 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
telsoa014fcda012018-03-09 14:13:49 +00007#include <armnn/Descriptors.hpp>
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +00008#include <armnn/Tensor.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include <arm_compute/core/Types.h>
11
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010012#include <boost/assert.hpp>
13
telsoa014fcda012018-03-09 14:13:49 +000014namespace armnn
15{
16
17inline arm_compute::NormalizationLayerInfo
Matteo Martincigh539b44d2018-10-01 09:26:39 +010018CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
19 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000020{
Matteo Martincigh539b44d2018-10-01 09:26:39 +010021 unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
22 const unsigned int depth = tensorInfo.GetShape()[depthDimension];
telsoa014fcda012018-03-09 14:13:49 +000023
24 // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
25 // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
26 // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
27 // parameters.
28 //
29 // Please refer to both the reference implementation of the normalization layer and the implementation of
30 // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
31
32 // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
33 // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
34 // ACL's normalization_layer_cross_map() CL function.
35 const uint32_t normSize = depth * 2u + 1u;
36
37 // See ACL's NormalizationLayerInfo::scale_coeff() definition.
38 // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
39 const float alpha = 1.0f;
40
telsoa01c577f2c2018-08-31 09:22:23 +010041 // Don't offset the reduction.
telsoa014fcda012018-03-09 14:13:49 +000042 const float kappa = 0.0f;
43
44 // pow(reduction, -0.5) = 1 / sqrt(reduction)
45 const float beta = 0.5f;
46
47 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
48}
49
50inline arm_compute::ActivationLayerInfo::ActivationFunction
51ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
52{
53 using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
54
55 switch (armnnFunction)
56 {
57 case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
telsoa01c577f2c2018-08-31 09:22:23 +010058 // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
telsoa014fcda012018-03-09 14:13:49 +000059 case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
60 case ActivationFunction::ReLu: return AclActivationFunction::RELU;
61 case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
62 case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
63 case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
64 case ActivationFunction::Abs: return AclActivationFunction::ABS;
65 case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
66 case ActivationFunction::Square: return AclActivationFunction::SQUARE;
67 case ActivationFunction::TanH: return AclActivationFunction::TANH;
68 default: throw InvalidArgumentException("Unsupported activation function");
69 }
70}
71
72inline arm_compute::ActivationLayerInfo
73ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc)
74{
75 return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
76 actDesc.m_A, actDesc.m_B);
77}
78
79inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
80{
81 using arm_compute::PoolingType;
82
83 switch (poolingAlgorithm)
84 {
85 case PoolingAlgorithm::Max: return PoolingType::MAX;
86 case PoolingAlgorithm::Average: return PoolingType::AVG;
87 case PoolingAlgorithm::L2: return PoolingType::L2;
88 default: throw InvalidArgumentException("Unsupported pooling algorithm");
89 }
90}
91
92inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding
93 rounding)
94{
95 using arm_compute::DimensionRoundingType;
96
97 switch (rounding)
98 {
99 case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
100 case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
101 default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
102 }
103}
104
105inline arm_compute::NormType
106ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
107{
108 using arm_compute::NormType;
109 switch (channelType)
110 {
111 case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
112 case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
113 default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
114 }
115}
116
telsoa01c577f2c2018-08-31 09:22:23 +0100117inline arm_compute::FullyConnectedLayerInfo
118ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc)
119{
120 arm_compute::FullyConnectedLayerInfo fc_info;
121 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
122 return fc_info;
123}
124
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100125inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
126{
127 switch (resizeMethod)
128 {
129 case ResizeMethod::Bilinear:
130 return arm_compute::InterpolationPolicy::BILINEAR;
131 case ResizeMethod::NearestNeighbor:
132 return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
133 default:
134 throw InvalidArgumentException("Unsupported resize method");
135 }
136}
137
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100138inline unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000139{
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100140 // Detect the Android default value of -1 and return the ACL default value of 1.
141 if (softmaxDesc.m_Axis == -1)
142 {
143 return 1;
144 }
145
146 unsigned int dim = tensor.GetNumDimensions();
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000147
148 BOOST_ASSERT(dim != 0);
149
150 // Currently ArmNN support axis 1.
151 return dim - 1;
152}
153
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100154inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
155{
156 unsigned int numSplit = desc.GetNumViews();
157 unsigned int numDimensions = desc.GetNumDimensions();
158 std::set<unsigned int> splitAxis;
159
160 for (unsigned int i = 0; i < numSplit; ++i)
161 {
162 for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
163 {
164 if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
165 {
166 splitAxis.insert(dimIdx);
167 }
168 }
169 }
170 return splitAxis;
171}
172
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +0000173} // namespace armnn