blob: a11b966f34c246060e7b6b88c603ce30ff6016c8 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlina52bca22024-02-01 17:36:48 +00002// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00005#include <aclCommon/ArmComputeTensorUtils.hpp>
6#include <aclCommon/ArmComputeUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +00007
Francis Murtagh351d13d2018-09-24 15:01:18 +01008#include "armnn/Exceptions.hpp"
Teresa Charlin6bc85252022-12-06 20:43:06 +00009#include "ArmComputeUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010#include <armnn/Descriptors.hpp>
11
Cathal Corbett4b19d222022-05-11 20:12:17 +010012#include <fmt/format.h>
13
telsoa014fcda012018-03-09 14:13:49 +000014namespace armnn
15{
16namespace armcomputetensorutils
17{
18
Derek Lambertid466a542020-01-22 15:37:29 +000019arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales)
telsoa014fcda012018-03-09 14:13:49 +000020{
21 switch(dataType)
22 {
Narumol Prangnawarat250d3922020-03-30 16:11:04 +010023 case armnn::DataType::BFloat16:
24 return arm_compute::DataType::BFLOAT16;
Mike Kelly130ec602019-11-08 12:08:35 +000025 case armnn::DataType::Boolean:
26 return arm_compute::DataType::U8;
telsoa01c577f2c2018-08-31 09:22:23 +010027 case armnn::DataType::Float16:
28 return arm_compute::DataType::F16;
telsoa014fcda012018-03-09 14:13:49 +000029 case armnn::DataType::Float32:
telsoa014fcda012018-03-09 14:13:49 +000030 return arm_compute::DataType::F32;
Ryan OShea9add1202020-02-07 10:06:33 +000031 case armnn::DataType::QAsymmS8:
32 return arm_compute::DataType::QASYMM8_SIGNED;
Derek Lambertif90c56d2020-01-10 17:14:08 +000033 case armnn::DataType::QAsymmU8:
telsoa014fcda012018-03-09 14:13:49 +000034 return arm_compute::DataType::QASYMM8;
Derek Lambertif90c56d2020-01-10 17:14:08 +000035 case armnn::DataType::QSymmS16:
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010036 return arm_compute::DataType::QSYMM16;
Inki Daed4619e22020-09-10 15:33:54 +090037 case armnn::DataType::Signed64:
38 return arm_compute::DataType::S64;
Finn Williamsfd271062019-12-04 14:27:27 +000039 case armnn::DataType::QSymmS8:
Derek Lambertid466a542020-01-22 15:37:29 +000040 {
41 return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
42 }
telsoa014fcda012018-03-09 14:13:49 +000043 case armnn::DataType::Signed32:
telsoa014fcda012018-03-09 14:13:49 +000044 return arm_compute::DataType::S32;
telsoa014fcda012018-03-09 14:13:49 +000045 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010046 ARMNN_ASSERT_MSG(false, "Unknown data type");
telsoa014fcda012018-03-09 14:13:49 +000047 return arm_compute::DataType::UNKNOWN;
telsoa014fcda012018-03-09 14:13:49 +000048 }
49}
50
Cathal Corbettfd5bec42022-03-03 15:13:23 +000051armnn::DataType GetArmNNDataType(arm_compute::DataType dataType)
52{
53 switch(dataType)
54 {
55 case arm_compute::DataType::BFLOAT16:
56 return armnn::DataType::BFloat16;
57 case arm_compute::DataType::U8:
58 return armnn::DataType::Boolean;
59 case arm_compute::DataType::F16:
60 return armnn::DataType::Float16;
61 case arm_compute::DataType::F32:
62 return armnn::DataType::Float32;
63 case arm_compute::DataType::QASYMM8_SIGNED:
64 return armnn::DataType::QAsymmS8;
65 case arm_compute::DataType::QASYMM8:
66 return armnn::DataType::QAsymmU8;
67 case arm_compute::DataType::QSYMM16:
68 return armnn::DataType::QSymmS16;
69 case arm_compute::DataType::S64:
70 return armnn::DataType::Signed64;
71 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
72 return armnn::DataType::QSymmS8;
73 case arm_compute::DataType::QSYMM8:
74 return armnn::DataType::QSymmS8;
75 case arm_compute::DataType::S32:
76 return armnn::DataType::Signed32;
77 default:
78 ARMNN_ASSERT_MSG(false, "Unknown data type");
79 return armnn::DataType::Float32;
80 }
81}
82
Matthew Benthamfd899962018-12-31 15:49:42 +000083arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
84 unsigned int originalInputRank,
85 const std::vector<unsigned int>& armnnAxes)
86{
87 arm_compute::Coordinates outAclCoords;
88
89 if (armnnAxes.empty())
90 {
91 // If no reduction axes were provided, then the input must be reduced along all dimensions.
92 // Since Compute Library does not accept an empty vector as the reduction dimensions, we then
93 // manually create a vector including all the input dimensions (in reversed order) as:
94 //
95 // { inputDimensions - 1, inputDimensions - 2, ..., 1, 0 }
96 //
97 outAclCoords.set_num_dimensions(inputDimensions);
98 std::generate(outAclCoords.begin(), outAclCoords.end(), [d = inputDimensions - 1] () mutable { return d--; });
99 }
100 else
101 {
102 // Create a vector of reduction dimensions (in reversed order) with the given reduction axes.
103 //
104 // Adjust the given reduction axes according to the original rank of the input tensor (before ACL applied any
105 // dimension correction).
106 // For example, if the input tensor originally had 4 dimensions, and one of the reduction axes was 2, then the
107 // new value for that reduction axis should be 1.
108 //
109 // Example:
110 // ArmNN input shape = { 1, 1, 3, 2 } -> ACL input shape = { 2, 3 }
111 // ArmNN reduction axis = { 2 } -> ACL reduction axis = { 1 }
112 // ArmNN reduction axis = { 3 } -> ACL reduction axis = { 0 }
113 //
114 // The transformation: ACL reduction axis index = original rank - ArmNN reduction axis index - 1
115 //
116 outAclCoords.set_num_dimensions(armnnAxes.size());
117 std::transform(armnnAxes.begin(), armnnAxes.end(),
118 outAclCoords.begin(),
119 [originalInputRank](unsigned int i){ return originalInputRank - i - 1; });
120 }
121
122 return outAclCoords;
123}
124
telsoa014fcda012018-03-09 14:13:49 +0000125arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape)
126{
127 arm_compute::TensorShape shape;
128
telsoa01c577f2c2018-08-31 09:22:23 +0100129 // armnn tensors are (batch, channels, height, width).
130 // arm_compute tensors are (width, height, channels, batch).
telsoa014fcda012018-03-09 14:13:49 +0000131 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++)
132 {
telsoa01c577f2c2018-08-31 09:22:23 +0100133 // Note that our dimensions are stored in the opposite order to ACL's.
Matthew Bentham89105282018-11-20 14:33:33 +0000134 shape.set(tensorShape.GetNumDimensions() - i - 1, tensorShape[i], false);
telsoa014fcda012018-03-09 14:13:49 +0000135
136 // TensorShape::set() flattens leading ones, so that batch size 1 cannot happen.
telsoa01c577f2c2018-08-31 09:22:23 +0100137 // arm_compute tensors expect this.
telsoa014fcda012018-03-09 14:13:49 +0000138 }
139
140 // prevent arm_compute issue where tensor is flattened to nothing
141 if (shape.num_dimensions() == 0)
142 {
143 shape.set_num_dimensions(1);
144 }
145
146 return shape;
147}
148
Mike Kelly0e3fe102023-01-23 19:32:06 +0000149std::vector<unsigned int> ReduceDimsForACL(const armnn::TensorShape tensorShape, unsigned int dimensions)
150{
151 std::vector<unsigned int> newShape;
152
153 unsigned int dimsToSkip = 0;
154
155 if (tensorShape.GetNumDimensions() > dimensions)
156 {
157 dimsToSkip = tensorShape.GetNumDimensions() - dimensions;
158 }
159 unsigned int dimsSkipped = 0;
160 bool insertRemainder = false;
161
162 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
163 {
164 if (tensorShape[i] == 1 && dimsSkipped < dimsToSkip && !insertRemainder)
165 {
166 ++dimsSkipped;
167 continue;
168 }
169 newShape.insert(newShape.begin(), tensorShape[i]);
170 // Once we insert the first dimension we can't skip any more
171 insertRemainder = true;
172 }
173 return newShape;
174}
175
176arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape, unsigned int dimensions)
177{
178 arm_compute::TensorShape shape;
179 std::vector<unsigned int> strippedShape = ReduceDimsForACL(tensorShape, dimensions);
180
181 for (unsigned int i = 0; i < strippedShape.size(); i++)
182 {
183 shape.set(i, strippedShape[i], false);
184 }
185
186 // prevent arm_compute issue where tensor is flattened to nothing
187 if (shape.num_dimensions() == 0)
188 {
189 shape.set_num_dimensions(1);
190 }
191 return shape;
192}
193
telsoa014fcda012018-03-09 14:13:49 +0000194// Utility function used to build a TensorInfo object, that can be used to initialise
195// ARM Compute Tensor and CLTensor allocators.
Cathal Corbett4452baf2022-05-13 09:55:59 +0100196// Note: this utility ignores the value of armnn::TensorInfo.IsConstant(). ACL tensors
197// default to constant but Arm NN ones default to non constant. In the cases where
198// we expect ACL to treat a tensor as constant that value must be set after this
199// utility has been called.
telsoa014fcda012018-03-09 14:13:49 +0000200arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo)
201{
Derek Lambertid466a542020-01-22 15:37:29 +0000202 bool multiScales = tensorInfo.HasMultipleQuantizationScales();
telsoa014fcda012018-03-09 14:13:49 +0000203 const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape());
Derek Lambertid466a542020-01-22 15:37:29 +0000204 const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType(), multiScales);
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000205
Derek Lambertid466a542020-01-22 15:37:29 +0000206 const arm_compute::QuantizationInfo aclQuantizationInfo = multiScales ?
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000207 arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScales()) :
208 arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScale(), tensorInfo.GetQuantizationOffset());
telsoa014fcda012018-03-09 14:13:49 +0000209
210 return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
211}
212
Francis Murtagh351d13d2018-09-24 15:01:18 +0100213arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
214 armnn::DataLayout dataLayout)
215{
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000216 arm_compute::TensorInfo aclTensorInfo = BuildArmComputeTensorInfo(tensorInfo);
217 aclTensorInfo.set_data_layout(ConvertDataLayout(dataLayout));
Francis Murtagh351d13d2018-09-24 15:01:18 +0100218
Aron Virginas-Tar13b653f2019-11-01 11:40:39 +0000219 return aclTensorInfo;
Francis Murtagh351d13d2018-09-24 15:01:18 +0100220}
221
Mike Kelly0e3fe102023-01-23 19:32:06 +0000222arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo, unsigned int dimensions)
223{
224 bool multiScales = tensorInfo.HasMultipleQuantizationScales();
225 const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape(), dimensions);
226 const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType(), multiScales);
227
228 const arm_compute::QuantizationInfo aclQuantizationInfo = multiScales ?
229 arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScales()) :
230 arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScale(), tensorInfo.GetQuantizationOffset());
231
232 return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo);
233}
234arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
235 armnn::DataLayout dataLayout, unsigned int dimensions)
236{
237 arm_compute::TensorInfo aclTensorInfo = BuildArmComputeTensorInfo(tensorInfo, dimensions);
238 aclTensorInfo.set_data_layout(ConvertDataLayout(dataLayout));
239
240 return aclTensorInfo;
241}
242
243
Matteo Martincigh747ef822018-12-18 09:26:39 +0000244arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout)
245{
246 switch(dataLayout)
247 {
248 case armnn::DataLayout::NHWC : return arm_compute::DataLayout::NHWC;
249
250 case armnn::DataLayout::NCHW : return arm_compute::DataLayout::NCHW;
251
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100252 case armnn::DataLayout::NDHWC : return arm_compute::DataLayout::NDHWC;
253
254 case armnn::DataLayout::NCDHW : return arm_compute::DataLayout::NCDHW;
255
Matteo Martincigh747ef822018-12-18 09:26:39 +0000256 default: throw InvalidArgumentException("Unknown armnn::DataLayout: [" +
257 std::to_string(static_cast<int>(dataLayout)) + "]");
258 }
259}
260
Sadik Armagana3600ba2019-10-10 10:43:20 +0100261arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
262 bool fpMixedPrecision)
telsoa014fcda012018-03-09 14:13:49 +0000263{
telsoa01c577f2c2018-08-31 09:22:23 +0100264 // Resolve ARM Compute layer parameters.
Ryan OSheabab8fa92022-03-09 10:29:02 +0000265 const arm_compute::PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
telsoa01c577f2c2018-08-31 09:22:23 +0100266
Ryan OSheabab8fa92022-03-09 10:29:02 +0000267 const arm_compute::DataLayout dataLayout = ConvertDataLayout(descriptor.m_DataLayout);
Teresa Charlinc809a292020-01-31 10:21:44 +0000268
telsoa01c577f2c2018-08-31 09:22:23 +0100269 bool isGlobalPooling = (descriptor.m_StrideX==0 && descriptor.m_StrideY==0);
270 //use specific constructor if global pooling
271 if(isGlobalPooling)
272 {
Teresa Charlinc809a292020-01-31 10:21:44 +0000273 return arm_compute::PoolingLayerInfo(poolingType, dataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +0100274 }
275
Ryan OSheabab8fa92022-03-09 10:29:02 +0000276 const arm_compute::DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType(
telsoa014fcda012018-03-09 14:13:49 +0000277 descriptor.m_OutputShapeRounding);
Ryan OSheabab8fa92022-03-09 10:29:02 +0000278 const arm_compute::PadStrideInfo padStrideInfo(descriptor.m_StrideX,
Teresa Charlina52bca22024-02-01 17:36:48 +0000279 descriptor.m_StrideY,
280 descriptor.m_PadLeft,
281 descriptor.m_PadRight,
282 descriptor.m_PadTop,
283 descriptor.m_PadBottom,
284 rounding);
telsoa014fcda012018-03-09 14:13:49 +0000285
286 const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
287
Ryan OSheabab8fa92022-03-09 10:29:02 +0000288 const arm_compute::Size2D poolSize(descriptor.m_PoolWidth, descriptor.m_PoolHeight);
surmeh01bceff2f2018-03-29 16:29:27 +0100289
Teresa Charlinc809a292020-01-31 10:21:44 +0000290 return arm_compute::PoolingLayerInfo(poolingType, poolSize, dataLayout, padStrideInfo, excludePadding,
291 fpMixedPrecision);
telsoa014fcda012018-03-09 14:13:49 +0000292}
293
Ryan OSheabab8fa92022-03-09 10:29:02 +0000294arm_compute::Pooling3dLayerInfo BuildArmComputePooling3dLayerInfo(const Pooling3dDescriptor& descriptor,
295 bool fpMixedPrecision)
296{
297 const arm_compute::PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
298
299 bool isGlobalPooling = (descriptor.m_StrideX==0 && descriptor.m_StrideY==0 && descriptor.m_StrideZ==0);
300 //use specific constructor if global pooling
301 if(isGlobalPooling)
302 {
303 return arm_compute::Pooling3dLayerInfo(poolingType);
304 }
305
306 const arm_compute::Size3D poolSize(descriptor.m_PoolWidth, descriptor.m_PoolHeight, descriptor.m_PoolDepth);
307
308 const arm_compute::Size3D stride(descriptor.m_StrideX,
309 descriptor.m_StrideY,
310 descriptor.m_StrideZ);
311
312 const arm_compute::Padding3D padding(descriptor.m_PadLeft,
313 descriptor.m_PadRight,
314 descriptor.m_PadTop,
315 descriptor.m_PadBottom,
316 descriptor.m_PadFront,
317 descriptor.m_PadBack);
318
319 const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
320
321 const arm_compute::DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType(
322 descriptor.m_OutputShapeRounding);
323
324 return arm_compute::Pooling3dLayerInfo(poolingType,
325 poolSize,
326 stride,
327 padding,
328 excludePadding,
329 fpMixedPrecision,
330 rounding);
331}
332
telsoa014fcda012018-03-09 14:13:49 +0000333arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& descriptor)
334{
335 const arm_compute::NormType normType =
336 ConvertNormalizationAlgorithmChannelToAclNormType(descriptor.m_NormChannelType);
337 return arm_compute::NormalizationLayerInfo(normType,
338 descriptor.m_NormSize,
339 descriptor.m_Alpha,
340 descriptor.m_Beta,
341 descriptor.m_K,
342 false);
343}
344
345arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& perm)
346{
347 arm_compute::PermutationVector aclPerm;
348
349 unsigned int start = 0;
surmeh01bceff2f2018-03-29 16:29:27 +0100350 while ((start < perm.GetSize()) && (start == perm[start]))
telsoa014fcda012018-03-09 14:13:49 +0000351 {
352 ++start;
353 }
354
355 for (unsigned int i = start; i < perm.GetSize(); ++i)
356 {
357 aclPerm.set(i - start, perm[i] - start);
358 }
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000359 return aclPerm;
360}
telsoa014fcda012018-03-09 14:13:49 +0000361
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000362arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& perm)
363{
Teresa Charlin6bc85252022-12-06 20:43:06 +0000364 // As ArmNN indexes are left to right and ACL indexes are right to left,
365 // the permutation vector has to be reversed and then translated into ACL axis.
366 // i.e. {1, 0, 2, 3} --> {3, 2, 0, 1} --> {0, 1, 3, 2}
367
368 // Below an example of how the ArmNN and ACL index format work:
369 // ArmNN Format:
370 // Input Shape {1, 10, 20, 30}
371 // Permutation Vector {1, 0, 2, 3}
372 // Output Shape {10, 1, 20, 30}
373 // dim "1" of input goes into index 0 of the output ([ 10, X, X, X])
374 // dim "0" of input goes into index 1 of the output ([ 10, 1, X, X ])
375 // dim "2" of input goes into index 2 of the output ([ 10, 1, 20, X ])
376 // dim "3" of input goes into index 3 of the output ([ 10, 1, 20, 30 ])
377 // ACL Format:
378 // Input Shape {30, 20, 10, 1}
379 // Permutation Vector {0, 1, 3, 2}
380 // Output Shape {30, 20, 1, 10}
381 // dim "0" of input goes into index 0 of the output ([ 30, X, X, X])
382 // dim "1" of input goes into index 1 of the output ([ 30, 20, X, X ])
383 // dim "3" of input goes into index 2 of the output ([ 30, 20, 1, X ])
384 // dim "2" of input goes into index 3 of the output ([ 30, 20, 1, 10 ])
385
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000386 arm_compute::PermutationVector aclPerm;
Teresa Charlin6bc85252022-12-06 20:43:06 +0000387 auto rank = perm.GetSize();
388
389 // Reverse the order. i.e. {1, 0, 2, 3} --> {3, 2, 0, 1}
390 std::vector<unsigned int> reversedPerm;
391 reversedPerm.reserve(rank);
392 for (unsigned int i = rank; i > 0; --i)
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000393 {
Teresa Charlin6bc85252022-12-06 20:43:06 +0000394 reversedPerm.push_back(perm[i-1]);
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000395 }
396
Teresa Charlin6bc85252022-12-06 20:43:06 +0000397 // Translate from Arm NN axis to ACL axis. i.e. {3, 2, 0, 1} --> {0, 1, 3, 2}
398 for (unsigned int i = 0; i < rank; ++i)
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000399 {
Teresa Charlin6bc85252022-12-06 20:43:06 +0000400 auto aclAxis = rank - 1 - reversedPerm[i];
401 aclPerm.set(i, aclAxis);
Mike Kellyc9ea45a2020-02-28 18:11:58 +0000402 }
telsoa014fcda012018-03-09 14:13:49 +0000403 return aclPerm;
404}
405
Sadik Armaganf4464322018-12-20 16:19:12 +0000406arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height)
407{
408 return arm_compute::Size2D(width, height);
409}
410
Kevin May263d7092022-11-29 14:34:48 +0000411arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float value)
Mike Kelly0a08ec62019-07-25 08:39:31 +0100412{
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +0100413 switch (tensorInfo->data_type())
Mike Kelly0a08ec62019-07-25 08:39:31 +0100414 {
Mike Kelly0a08ec62019-07-25 08:39:31 +0100415 case arm_compute::DataType::F16:
Kevin May263d7092022-11-29 14:34:48 +0000416 {
417 arm_compute::PixelValue pixelValue = arm_compute::PixelValue(static_cast<Half>(value));
418 if (isinf(pixelValue.get<Half>())) {
419 throw InvalidArgumentException("Under/Overflow converting float value [" + std::to_string(value) +
420 "] to fp16: [" + std::to_string(pixelValue.get<Half>()) + "]");
421 }
422 return pixelValue;
423 }
Mike Kelly0a08ec62019-07-25 08:39:31 +0100424 case arm_compute::DataType::F32:
Kevin May263d7092022-11-29 14:34:48 +0000425 return arm_compute::PixelValue(value);
Mike Kelly130ec602019-11-08 12:08:35 +0000426 case arm_compute::DataType::QASYMM8:
Kevin May263d7092022-11-29 14:34:48 +0000427 return arm_compute::PixelValue(static_cast<uint8_t>(value));
Mike Kelly130ec602019-11-08 12:08:35 +0000428 case arm_compute::DataType::QSYMM16:
Kevin May263d7092022-11-29 14:34:48 +0000429 return arm_compute::PixelValue(static_cast<int16_t>(value));
Tamas Nyirid3065d72021-11-12 11:22:50 +0000430 case arm_compute::DataType::QSYMM8:
Sadik Armagane5d0b932020-04-09 15:48:44 +0100431 case arm_compute::DataType::QASYMM8_SIGNED:
Mike Kelly130ec602019-11-08 12:08:35 +0000432 case arm_compute::DataType::QSYMM8_PER_CHANNEL:
Kevin May263d7092022-11-29 14:34:48 +0000433 return arm_compute::PixelValue(static_cast<int8_t>(value));
Sadik Armagana792a052020-06-23 16:22:23 +0100434 case arm_compute::DataType::S32:
Kevin May263d7092022-11-29 14:34:48 +0000435 return arm_compute::PixelValue(static_cast<int32_t>(value));
Mike Kelly0a08ec62019-07-25 08:39:31 +0100436 default:
437 throw InvalidArgumentException("Unsupported DataType: [" +
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +0100438 std::to_string(static_cast<int>(tensorInfo->data_type())) + "]");
Mike Kelly0a08ec62019-07-25 08:39:31 +0100439 }
440}
441
Cathal Corbett4b19d222022-05-11 20:12:17 +0100442unsigned int ComputeDepthwiseConv2dDepthMultiplier(armnn::DataLayout layout,
443 const arm_compute::TensorShape& weightsShape,
444 const arm_compute::TensorShape& inputShape)
445{
446 unsigned int depthMultiplier;
447 if (layout == armnn::DataLayout::NHWC)
448 {
449 depthMultiplier = static_cast<uint32_t>(weightsShape[0]) / static_cast<uint32_t>(inputShape[0]);
450 }
451 else if (layout == armnn::DataLayout::NCHW)
452 {
453 depthMultiplier = static_cast<uint32_t>(weightsShape[2]) / static_cast<uint32_t>(inputShape[2]);
454 }
455 else
456 {
457 throw InvalidArgumentException(fmt::format("Unknown data layout for tensor conversion: {}",
458 GetDataLayoutName(layout)));
459 }
460 return depthMultiplier;
461}
462
telsoa014fcda012018-03-09 14:13:49 +0000463} // namespace armcomputetensorutils
464} // namespace armnn