blob: 3d5b9cae01d188c6ea717cc0016521ca9b6fe82c [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
telsoa014fcda012018-03-09 14:13:49 +00007#include <armnn/Descriptors.hpp>
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +00008#include <armnn/Tensor.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include <arm_compute/core/Types.h>
11
12namespace armnn
13{
14
15inline arm_compute::NormalizationLayerInfo
Matteo Martincigh539b44d2018-10-01 09:26:39 +010016CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
17 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000018{
Matteo Martincigh539b44d2018-10-01 09:26:39 +010019 unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
20 const unsigned int depth = tensorInfo.GetShape()[depthDimension];
telsoa014fcda012018-03-09 14:13:49 +000021
22 // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
23 // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
24 // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
25 // parameters.
26 //
27 // Please refer to both the reference implementation of the normalization layer and the implementation of
28 // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
29
30 // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
31 // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
32 // ACL's normalization_layer_cross_map() CL function.
33 const uint32_t normSize = depth * 2u + 1u;
34
35 // See ACL's NormalizationLayerInfo::scale_coeff() definition.
36 // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
37 const float alpha = 1.0f;
38
telsoa01c577f2c2018-08-31 09:22:23 +010039 // Don't offset the reduction.
telsoa014fcda012018-03-09 14:13:49 +000040 const float kappa = 0.0f;
41
42 // pow(reduction, -0.5) = 1 / sqrt(reduction)
43 const float beta = 0.5f;
44
45 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
46}
47
48inline arm_compute::ActivationLayerInfo::ActivationFunction
49ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
50{
51 using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
52
53 switch (armnnFunction)
54 {
55 case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
telsoa01c577f2c2018-08-31 09:22:23 +010056 // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
telsoa014fcda012018-03-09 14:13:49 +000057 case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
58 case ActivationFunction::ReLu: return AclActivationFunction::RELU;
59 case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
60 case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
61 case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
62 case ActivationFunction::Abs: return AclActivationFunction::ABS;
63 case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
64 case ActivationFunction::Square: return AclActivationFunction::SQUARE;
65 case ActivationFunction::TanH: return AclActivationFunction::TANH;
66 default: throw InvalidArgumentException("Unsupported activation function");
67 }
68}
69
70inline arm_compute::ActivationLayerInfo
71ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc)
72{
73 return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
74 actDesc.m_A, actDesc.m_B);
75}
76
77inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
78{
79 using arm_compute::PoolingType;
80
81 switch (poolingAlgorithm)
82 {
83 case PoolingAlgorithm::Max: return PoolingType::MAX;
84 case PoolingAlgorithm::Average: return PoolingType::AVG;
85 case PoolingAlgorithm::L2: return PoolingType::L2;
86 default: throw InvalidArgumentException("Unsupported pooling algorithm");
87 }
88}
89
90inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding
91 rounding)
92{
93 using arm_compute::DimensionRoundingType;
94
95 switch (rounding)
96 {
97 case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
98 case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
99 default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
100 }
101}
102
103inline arm_compute::NormType
104ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
105{
106 using arm_compute::NormType;
107 switch (channelType)
108 {
109 case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
110 case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
111 default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
112 }
113}
114
telsoa01c577f2c2018-08-31 09:22:23 +0100115inline arm_compute::FullyConnectedLayerInfo
116ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc)
117{
118 arm_compute::FullyConnectedLayerInfo fc_info;
119 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
120 return fc_info;
121}
122
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +0000123} // namespace armnn