blob: 1cad92f58ab6af3d03a8296ad2dc48e68ab0e791 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00005#include <aclCommon/ArmComputeTensorUtils.hpp>
6#include <aclCommon/ArmComputeUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +00007
Francis Murtagh351d13d2018-09-24 15:01:18 +01008#include "armnn/Exceptions.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
10
11namespace armnn
12{
13namespace armcomputetensorutils
14{
15
16arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType)
17{
18 switch(dataType)
19 {
Mike Kelly130ec602019-11-08 12:08:35 +000020 case armnn::DataType::Boolean:
21 return arm_compute::DataType::U8;
telsoa01c577f2c2018-08-31 09:22:23 +010022 case armnn::DataType::Float16:
23 return arm_compute::DataType::F16;
telsoa014fcda012018-03-09 14:13:49 +000024 case armnn::DataType::Float32:
telsoa014fcda012018-03-09 14:13:49 +000025 return arm_compute::DataType::F32;
Derek Lambertif90c56d2020-01-10 17:14:08 +000026 case armnn::DataType::QAsymmU8:
telsoa014fcda012018-03-09 14:13:49 +000027 return arm_compute::DataType::QASYMM8;
Derek Lambertif90c56d2020-01-10 17:14:08 +000028 case armnn::DataType::QSymmS16:
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010029 return arm_compute::DataType::QSYMM16;
Finn Williamsfd271062019-12-04 14:27:27 +000030 case armnn::DataType::QSymmS8:
Mike Kelly49d0f122019-11-19 09:12:19 +000031 return arm_compute::DataType::QSYMM8;
Mike Kelly130ec602019-11-08 12:08:35 +000032 case armnn::DataType::QuantizedSymm8PerAxis:
33 return arm_compute::DataType::QSYMM8_PER_CHANNEL;
telsoa014fcda012018-03-09 14:13:49 +000034 case armnn::DataType::Signed32:
telsoa014fcda012018-03-09 14:13:49 +000035 return arm_compute::DataType::S32;
telsoa014fcda012018-03-09 14:13:49 +000036 default:
telsoa014fcda012018-03-09 14:13:49 +000037 BOOST_ASSERT_MSG(false, "Unknown data type");
38 return arm_compute::DataType::UNKNOWN;
telsoa014fcda012018-03-09 14:13:49 +000039 }
40}
41
Matthew Benthamfd899962018-12-31 15:49:42 +000042arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
43 unsigned int originalInputRank,
44 const std::vector<unsigned int>& armnnAxes)
45{
46 arm_compute::Coordinates outAclCoords;
47
48 if (armnnAxes.empty())
49 {
50 // If no reduction axes were provided, then the input must be reduced along all dimensions.
51 // Since Compute Library does not accept an empty vector as the reduction dimensions, we then
52 // manually create a vector including all the input dimensions (in reversed order) as:
53 //
54 // { inputDimensions - 1, inputDimensions - 2, ..., 1, 0 }
55 //
56 outAclCoords.set_num_dimensions(inputDimensions);
57 std::generate(outAclCoords.begin(), outAclCoords.end(), [d = inputDimensions - 1] () mutable { return d--; });
58 }
59 else
60 {
61 // Create a vector of reduction dimensions (in reversed order) with the given reduction axes.
62 //
63 // Adjust the given reduction axes according to the original rank of the input tensor (before ACL applied any
64 // dimension correction).
65 // For example, if the input tensor originally had 4 dimensions, and one of the reduction axes was 2, then the
66 // new value for that reduction axis should be 1.
67 //
68 // Example:
69 // ArmNN input shape = { 1, 1, 3, 2 } -> ACL input shape = { 2, 3 }
70 // ArmNN reduction axis = { 2 } -> ACL reduction axis = { 1 }
71 // ArmNN reduction axis = { 3 } -> ACL reduction axis = { 0 }
72 //
73 // The transformation: ACL reduction axis index = original rank - ArmNN reduction axis index - 1
74 //
75 outAclCoords.set_num_dimensions(armnnAxes.size());
76 std::transform(armnnAxes.begin(), armnnAxes.end(),
77 outAclCoords.begin(),
78 [originalInputRank](unsigned int i){ return originalInputRank - i - 1; });
79 }
80
81 return outAclCoords;
82}
83
telsoa014fcda012018-03-09 14:13:49 +000084arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape)
85{
86 arm_compute::TensorShape shape;
87
telsoa01c577f2c2018-08-31 09:22:23 +010088 // armnn tensors are (batch, channels, height, width).
89 // arm_compute tensors are (width, height, channels, batch).
telsoa014fcda012018-03-09 14:13:49 +000090 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++)
91 {
telsoa01c577f2c2018-08-31 09:22:23 +010092 // Note that our dimensions are stored in the opposite order to ACL's.
Matthew Bentham89105282018-11-20 14:33:33 +000093 shape.set(tensorShape.GetNumDimensions() - i - 1, tensorShape[i], false);
telsoa014fcda012018-03-09 14:13:49 +000094
95 // TensorShape::set() flattens leading ones, so that batch size 1 cannot happen.
telsoa01c577f2c2018-08-31 09:22:23 +010096 // arm_compute tensors expect this.
telsoa014fcda012018-03-09 14:13:49 +000097 }
98
99 // prevent arm_compute issue where tensor is flattened to nothing
100 if (shape.num_dimensions() == 0)
101 {
102 shape.set_num_dimensions(1);
103 }
104
105 return shape;
106}
107
108// Utility function used to build a TensorInfo object, that can be used to initialise
109// ARM Compute Tensor and CLTensor allocators.
110arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo)
111{
112 const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape());
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000113 const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType());
114
115 const arm_compute::QuantizationInfo aclQuantizationInfo = tensorInfo.HasMultipleQuantizationScales() ?
116 arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScales()) :
117 arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScale(), tensorInfo.GetQuantizationOffset());
telsoa014fcda012018-03-09 14:13:49 +0000118
119 return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
120}
121
Francis Murtagh351d13d2018-09-24 15:01:18 +0100122arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
123 armnn::DataLayout dataLayout)
124{
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000125 arm_compute::TensorInfo aclTensorInfo = BuildArmComputeTensorInfo(tensorInfo);
126 aclTensorInfo.set_data_layout(ConvertDataLayout(dataLayout));
Francis Murtagh351d13d2018-09-24 15:01:18 +0100127
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000128 return aclTensorInfo;
Francis Murtagh351d13d2018-09-24 15:01:18 +0100129}
130
Matteo Martincigh747ef822018-12-18 09:26:39 +0000131arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout)
132{
133 switch(dataLayout)
134 {
135 case armnn::DataLayout::NHWC : return arm_compute::DataLayout::NHWC;
136
137 case armnn::DataLayout::NCHW : return arm_compute::DataLayout::NCHW;
138
139 default: throw InvalidArgumentException("Unknown armnn::DataLayout: [" +
140 std::to_string(static_cast<int>(dataLayout)) + "]");
141 }
142}
143
Sadik Armagana3600ba2019-10-10 10:43:20 +0100144arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
145 bool fpMixedPrecision)
telsoa014fcda012018-03-09 14:13:49 +0000146{
147 using arm_compute::PoolingType;
148 using arm_compute::DimensionRoundingType;
149 using arm_compute::PadStrideInfo;
150 using arm_compute::PoolingLayerInfo;
surmeh01bceff2f2018-03-29 16:29:27 +0100151 using arm_compute::Size2D;
telsoa014fcda012018-03-09 14:13:49 +0000152
telsoa01c577f2c2018-08-31 09:22:23 +0100153 // Resolve ARM Compute layer parameters.
telsoa014fcda012018-03-09 14:13:49 +0000154 const PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
telsoa01c577f2c2018-08-31 09:22:23 +0100155
156 bool isGlobalPooling = (descriptor.m_StrideX==0 && descriptor.m_StrideY==0);
157 //use specific constructor if global pooling
158 if(isGlobalPooling)
159 {
160 return arm_compute::PoolingLayerInfo(poolingType);
161 }
162
telsoa014fcda012018-03-09 14:13:49 +0000163 const DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType(
164 descriptor.m_OutputShapeRounding);
telsoa014fcda012018-03-09 14:13:49 +0000165 const PadStrideInfo padStrideInfo(descriptor.m_StrideX,
166 descriptor.m_StrideY,
167 descriptor.m_PadLeft,
168 descriptor.m_PadRight,
169 descriptor.m_PadTop,
170 descriptor.m_PadBottom,
171 rounding);
172
173 const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
174
surmeh01bceff2f2018-03-29 16:29:27 +0100175 const Size2D poolSize(descriptor.m_PoolWidth, descriptor.m_PoolHeight);
176
Sadik Armagana3600ba2019-10-10 10:43:20 +0100177 return arm_compute::PoolingLayerInfo(poolingType, poolSize, padStrideInfo, excludePadding, fpMixedPrecision);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
180arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& descriptor)
181{
182 const arm_compute::NormType normType =
183 ConvertNormalizationAlgorithmChannelToAclNormType(descriptor.m_NormChannelType);
184 return arm_compute::NormalizationLayerInfo(normType,
185 descriptor.m_NormSize,
186 descriptor.m_Alpha,
187 descriptor.m_Beta,
188 descriptor.m_K,
189 false);
190}
191
192arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& perm)
193{
194 arm_compute::PermutationVector aclPerm;
195
196 unsigned int start = 0;
surmeh01bceff2f2018-03-29 16:29:27 +0100197 while ((start < perm.GetSize()) && (start == perm[start]))
telsoa014fcda012018-03-09 14:13:49 +0000198 {
199 ++start;
200 }
201
202 for (unsigned int i = start; i < perm.GetSize(); ++i)
203 {
204 aclPerm.set(i - start, perm[i] - start);
205 }
206
207 return aclPerm;
208}
209
Sadik Armaganf4464322018-12-20 16:19:12 +0000210arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height)
211{
212 return arm_compute::Size2D(width, height);
213}
214
Mike Kelly0a08ec62019-07-25 08:39:31 +0100215arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue)
216{
217 switch (input.info()->data_type())
218 {
Mike Kelly0a08ec62019-07-25 08:39:31 +0100219 case arm_compute::DataType::F16:
220 return arm_compute::PixelValue(static_cast<Half>(pixelValue));
221 case arm_compute::DataType::F32:
222 return arm_compute::PixelValue(pixelValue);
Mike Kelly130ec602019-11-08 12:08:35 +0000223 case arm_compute::DataType::QASYMM8:
224 return arm_compute::PixelValue(static_cast<uint8_t>(pixelValue));
225 case arm_compute::DataType::QSYMM16:
226 return arm_compute::PixelValue(static_cast<int16_t>(pixelValue));
227 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
228 return arm_compute::PixelValue(static_cast<int8_t>(pixelValue));
Mike Kelly0a08ec62019-07-25 08:39:31 +0100229 default:
230 throw InvalidArgumentException("Unsupported DataType: [" +
231 std::to_string(static_cast<int>(input.info()->data_type())) + "]");
232 }
233}
234
Aron Virginas-Tar710f6642019-11-27 14:48:32 +0000235bool IsQuantMultiplierSupported(const TensorInfo& input,
236 const TensorInfo& output,
237 const TensorInfo& weights)
238{
239 constexpr float maxQuantMultiplier = 1.0f;
240 if (weights.HasMultipleQuantizationScales())
241 {
242 for (float weightScale : weights.GetQuantizationScales())
243 {
244 if ((input.GetQuantizationScale() * weightScale) / output.GetQuantizationScale() > maxQuantMultiplier)
245 {
246 return false;
247 }
248 }
249 }
250 else
251 {
252 if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) /
253 output.GetQuantizationScale() > maxQuantMultiplier)
254 {
255 return false;
256 }
257 }
258
259 return true;
260}
261
telsoa014fcda012018-03-09 14:13:49 +0000262} // namespace armcomputetensorutils
263} // namespace armnn