blob: 3e703a563a52e31c657e3a72b27232fceee0b9d0 [file] [log] [blame]
Nina Drozdd41b2592018-11-19 13:03:36 +00001//
Mike Kelly0e3fe102023-01-23 19:32:06 +00002// Copyright © 2017-2023 Arm Ltd. All rights reserved.
Nina Drozdd41b2592018-11-19 13:03:36 +00003// SPDX-License-Identifier: MIT
4//
5
Matteo Martincighe011d202019-11-28 11:35:47 +00006#include <armnnUtils/TensorUtils.hpp>
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +00007
Jim Flynn39faea82023-09-17 09:02:23 +01008#include <armnn/Exceptions.hpp>
9
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000010#include <armnn/backends/ITensorHandle.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010011#include <armnn/utility/Assert.hpp>
Matthew Sloyan0663d662020-09-14 11:47:26 +010012#include <armnn/utility/NumericCast.hpp>
Nina Drozdd41b2592018-11-19 13:03:36 +000013
Colm Donelan5b5c2222020-09-09 12:48:16 +010014#include <fmt/format.h>
Narumol Prangnawarat02807852019-09-11 16:43:09 +010015
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000016using namespace armnn;
17
Nina Drozdd41b2592018-11-19 13:03:36 +000018namespace armnnUtils
19{
20
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000021TensorShape GetTensorShape(unsigned int numberOfBatches,
Nina Drozdd41b2592018-11-19 13:03:36 +000022 unsigned int numberOfChannels,
23 unsigned int height,
24 unsigned int width,
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000025 const DataLayout dataLayout)
Nina Drozdd41b2592018-11-19 13:03:36 +000026{
27 switch (dataLayout)
28 {
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000029 case DataLayout::NCHW:
30 return TensorShape({numberOfBatches, numberOfChannels, height, width});
31 case DataLayout::NHWC:
32 return TensorShape({numberOfBatches, height, width, numberOfChannels});
Nina Drozdd41b2592018-11-19 13:03:36 +000033 default:
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000034 throw InvalidArgumentException("Unknown data layout ["
Nina Drozdd41b2592018-11-19 13:03:36 +000035 + std::to_string(static_cast<int>(dataLayout)) +
36 "]", CHECK_LOCATION());
37 }
38}
39
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000040TensorInfo GetTensorInfo(unsigned int numberOfBatches,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000041 unsigned int numberOfChannels,
42 unsigned int height,
43 unsigned int width,
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000044 const DataLayout dataLayout,
45 const DataType dataType)
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000046{
47 switch (dataLayout)
48 {
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000049 case DataLayout::NCHW:
50 return TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType);
51 case DataLayout::NHWC:
52 return TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType);
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000053 default:
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000054 throw InvalidArgumentException("Unknown data layout ["
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000055 + std::to_string(static_cast<int>(dataLayout)) +
56 "]", CHECK_LOCATION());
57 }
Nina Drozdd41b2592018-11-19 13:03:36 +000058}
59
Tamás Nyíri7b885b32021-10-26 14:47:57 +010060TensorInfo GetTensorInfo(unsigned int numberOfBatches,
61 unsigned int numberOfChannels,
62 unsigned int depth,
63 unsigned int height,
64 unsigned int width,
65 const DataLayout dataLayout,
66 const DataType dataType)
67{
68 switch (dataLayout)
69 {
70 case DataLayout::NDHWC:
71 return TensorInfo({numberOfBatches, depth, height, width, numberOfChannels}, dataType);
72 case DataLayout::NCDHW:
73 return TensorInfo({numberOfBatches, numberOfChannels, depth, height, width}, dataType);
74 default:
75 throw InvalidArgumentException("Unknown data layout ["
76 + std::to_string(static_cast<int>(dataLayout)) +
77 "]", CHECK_LOCATION());
78 }
79}
80
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +000081std::pair<float, float> FindMinMax(ITensorHandle* tensorHandle)
Jim Flynnf92dfce2019-05-02 11:33:25 +010082{
83 auto tensor_data = static_cast<const float *>(tensorHandle->Map(true));
84 auto tensor_size = tensorHandle->GetShape().GetNumElements();
85
86 // Set min/max initially to first value in tensor
87 float min = tensor_data[0];
88 float max = tensor_data[0];
89
90 // Loop over rest of tensor and update min/max if necessary
91 for (unsigned int val = 1; val < tensor_size; val++)
92 {
93 if (tensor_data[val] < min)
94 {
95 min = tensor_data[val];
96 }
97 else if (tensor_data[val] > max)
98 {
99 max = tensor_data[val];
100 }
101 }
102
103 tensorHandle->Unmap();
104
105 return std::make_pair(min, max);
106}
107
Mike Kelly0e3fe102023-01-23 19:32:06 +0000108TensorShape ReduceDims(const TensorShape& tensorShape, unsigned int dimensions)
109{
110 if (tensorShape.GetNumDimensions() <= dimensions)
111 {
112 return tensorShape;
113 }
114 std::vector<unsigned int> newShape;
115
116 unsigned int dimsToSkip = tensorShape.GetNumDimensions() - dimensions;
117 unsigned int dimsSkipped = 0;
118 bool insertRemainder = false;
119
120 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
121 {
122 if (tensorShape[i] == 1 && dimsSkipped < dimsToSkip && !insertRemainder)
123 {
124 ++dimsSkipped;
125 continue;
126 }
127 newShape.push_back(tensorShape[i]);
128 // Once we insert the first dimension we can't skip any more
129 insertRemainder = true;
130 }
131 return TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data());
132}
133
134TensorInfo ReduceDims(const TensorInfo& tensorInfo, unsigned int dimensions)
135{
136 TensorInfo strippedTensor(tensorInfo);
137 TensorShape strippedShape = ReduceDims(tensorInfo.GetShape(), dimensions);
138 strippedTensor.SetShape(strippedShape);
139 return strippedTensor;
140}
141
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +0000142TensorShape ExpandDims(const TensorShape& tensorShape, int axis)
Narumol Prangnawarat02807852019-09-11 16:43:09 +0100143{
144 unsigned int outputDim = tensorShape.GetNumDimensions() + 1;
145
Matthew Sloyan0663d662020-09-14 11:47:26 +0100146 if (axis < -armnn::numeric_cast<int>(outputDim) || axis > armnn::numeric_cast<int>(tensorShape.GetNumDimensions()))
Narumol Prangnawarat02807852019-09-11 16:43:09 +0100147 {
Colm Donelan5b5c2222020-09-09 12:48:16 +0100148 throw InvalidArgumentException(fmt::format("Invalid expansion axis {} for {}D input tensor. {}",
149 axis,
150 tensorShape.GetNumDimensions(),
151 CHECK_LOCATION().AsString()));
Narumol Prangnawarat02807852019-09-11 16:43:09 +0100152 }
153
154 if (axis < 0)
155 {
Matthew Sloyan0663d662020-09-14 11:47:26 +0100156 axis = armnn::numeric_cast<int>(outputDim) + axis;
Narumol Prangnawarat02807852019-09-11 16:43:09 +0100157 }
158
159 std::vector<unsigned int> outputShape;
Colm Donelan5b5c2222020-09-09 12:48:16 +0100160 outputShape.reserve(tensorShape.GetNumDimensions());
Narumol Prangnawarat02807852019-09-11 16:43:09 +0100161 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
162 {
163 outputShape.push_back(tensorShape[i]);
164 }
165 outputShape.insert(outputShape.begin() + axis, 1);
166
Mike Kelly0506ef02023-01-03 16:29:44 +0000167 return { outputDim, outputShape.data() };
Narumol Prangnawarat02807852019-09-11 16:43:09 +0100168}
169
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000170TensorShape ExpandDimsToRank(const TensorShape& tensorShape, unsigned int rank)
171{
172 // Can't expand if rank is smaller than current shape
173 if (tensorShape.GetNumDimensions() >= rank)
174 {
175 return tensorShape;
176 }
177
178 std::vector<unsigned int> newShape;
179
180 // First add 1s to the beginning of the tensorInfo to fill in the space
181 for (unsigned int i = 0; i < rank - tensorShape.GetNumDimensions(); ++i)
182 {
183 newShape.push_back(1);
184 }
185
186 // Then iterate through the original shape and append it to the new shape with the added 1s
187 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
188 {
189 newShape.push_back(tensorShape[i]);
190 }
191
192 return TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data());
193}
194
Mike Kelly80512b02022-05-16 23:10:42 +0100195std::vector<unsigned int> SqueezeDims(const TensorShape& tensorShape)
196{
Mike Kelly80512b02022-05-16 23:10:42 +0100197 std::vector<unsigned int> squeezedDims;
198
199 for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
200 {
201 if (tensorShape[i] != 1)
202 {
203 squeezedDims.push_back(tensorShape[i]);
Mike Kelly80512b02022-05-16 23:10:42 +0100204 }
205 }
206 return squeezedDims;
207}
208
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +0000209unsigned int GetNumElementsBetween(const TensorShape& shape,
Narumol Prangnawarat4dc64a62019-09-16 17:00:22 +0100210 const unsigned int firstAxisInclusive,
211 const unsigned int lastAxisExclusive)
212{
Jim Flynn39faea82023-09-17 09:02:23 +0100213 if (firstAxisInclusive > lastAxisExclusive)
214 {
215 throw armnn::InvalidArgumentException(fmt::format(
216 "GetNumElementsBetween: firstAxisInclusive [{}D] is greater than lastAxisExclusive [{}D]",
217 firstAxisInclusive,
218 lastAxisExclusive));
219 }
220 if (lastAxisExclusive > shape.GetNumDimensions())
221 {
222 throw armnn::InvalidArgumentException(fmt::format(
223 "{}: lastAxisExclusive [{}D] is greater than the number of dimensions of the tensor shape [{}D]"
224 "GetNumElementsBetween",
225 lastAxisExclusive,
226 shape.GetNumDimensions()));
227 }
Narumol Prangnawarat4dc64a62019-09-16 17:00:22 +0100228 unsigned int count = 1;
229 for (unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++)
230 {
231 count *= shape[i];
232 }
233 return count;
234}
235
236unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
237{
Jim Flynn39faea82023-09-17 09:02:23 +0100238 if (axis >= armnn::numeric_cast<int>(inputDimension))
239 {
240 throw armnn::InvalidArgumentException(fmt::format(
241 "{}: axis index [{}] is not less than the number of dimensions [{}D]",
242 "GetUnsignedAxis",
243 axis,
244 inputDimension));
245 }
246 if (axis < -armnn::numeric_cast<int>(inputDimension))
247 {
248 throw armnn::InvalidArgumentException(fmt::format(
249 "{}: axis index [{}] lower than the negative of the number of dimensions [{}]",
250 "GetUnsignedAxis",
251 axis,
252 -armnn::numeric_cast<int>(inputDimension)));
253 }
Narumol Prangnawarat4dc64a62019-09-16 17:00:22 +0100254
255 unsigned int uAxis = axis < 0 ?
Matthew Sloyan0663d662020-09-14 11:47:26 +0100256 inputDimension - armnn::numeric_cast<unsigned int>(abs(axis))
257 : armnn::numeric_cast<unsigned int>(axis);
Narumol Prangnawarat4dc64a62019-09-16 17:00:22 +0100258 return uAxis;
259}
260
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +0000261unsigned int GetNumElementsAfter(const armnn::TensorShape& shape, unsigned int axis)
262{
263 unsigned int numDim = shape.GetNumDimensions();
Jim Flynn39faea82023-09-17 09:02:23 +0100264 if (axis >= numDim)
265 {
266 throw armnn::InvalidArgumentException(fmt::format(
267 "{}: axis index [{}D] indexes beyond the number of dimesions of the tensor shape [{}D]",
268 "GetNumElementsAfter",
269 axis,
270 numDim));
271 }
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +0000272 unsigned int count = 1;
Jan Eilers53ef7952021-06-02 12:01:25 +0100273 for (unsigned int i = axis+1; i < numDim; i++)
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +0000274 {
275 count *= shape[i];
276 }
277 return count;
278}
279
280std::pair<unsigned int, std::vector<float>> GetPerAxisParams(const armnn::TensorInfo& info)
281{
282 const std::vector<float>& scales = info.GetQuantizationScales();
283 armnn::Optional<unsigned int> quantizationDim = info.GetQuantizationDim();
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000284 if (!info.HasPerAxisQuantization())
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +0000285 {
286 throw armnn::InvalidArgumentException(
287 std::string("Per-axis quantization params not set for tensor of type ") +
288 armnn::GetDataTypeName(info.GetDataType()), CHECK_LOCATION());
289 }
Jan Eilers53ef7952021-06-02 12:01:25 +0100290 unsigned int axisFactor = GetNumElementsAfter(info.GetShape(), quantizationDim.value()) ;
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +0000291
292 return { axisFactor, scales };
293}
294
Mike Kelly0506ef02023-01-03 16:29:44 +0000295template<typename PrimitiveType>
296void CheckSizes(const std::vector<PrimitiveType>& data, const armnn::TensorInfo& tensorInfo, unsigned int size = 1)
297{
298 if (data.size() / size != tensorInfo.GetNumElements())
299 {
300 throw InvalidArgumentException(
301 fmt::format("The data does not contain the expected number of elements {} != {}. {}",
302 data.size(), tensorInfo.GetNumElements(), CHECK_LOCATION().AsString()));
303 }
304}
305
306template<typename PrimitiveType>
307std::unique_ptr<float[]> ToFloatArray(const std::vector<PrimitiveType>& data, const armnn::TensorInfo& tensorInfo)
308{
309 CheckSizes(data, tensorInfo);
310
311 std::unique_ptr<float[]> returnBuffer(new float[tensorInfo.GetNumElements()]);
312
313 if (tensorInfo.HasPerAxisQuantization())
314 {
315 unsigned int axis = tensorInfo.GetQuantizationDim().value();
316 auto axisDimensionality = tensorInfo.GetShape()[axis];
317 auto axisFactor = armnnUtils::GetNumElementsAfter(tensorInfo.GetShape(), axis);
318
319 for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
320 {
321 unsigned int axisIndex;
322
323 if (i < axisFactor)
324 {
325 axisIndex = 0;
326 }
327 else
328 {
329 axisIndex = (i / axisFactor) % axisDimensionality;
330 }
331 returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
332 tensorInfo.GetQuantizationScales()[axisIndex],
333 tensorInfo.GetQuantizationOffset());
334 }
335 }
336 else
337 {
338 for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
339 {
340 returnBuffer[i] = Dequantize<PrimitiveType>(data[i],
341 tensorInfo.GetQuantizationScale(),
342 tensorInfo.GetQuantizationOffset());
343 }
344 }
345 return returnBuffer;
346}
347
348std::unique_ptr<float[]> ToFloatArray(const std::vector<uint8_t>& data, const armnn::TensorInfo& tensorInfo)
349{
350 if (tensorInfo.GetDataType() == DataType::QAsymmS8 || tensorInfo.GetDataType() == DataType::QSymmS8)
351 {
352 CheckSizes(data, tensorInfo);
353 std::vector<int8_t> buffer(tensorInfo.GetNumElements());
354 ::memcpy(buffer.data(), data.data(), data.size());
355 return ToFloatArray<int8_t>(buffer, tensorInfo);
356 }
357 else if (tensorInfo.GetDataType() == DataType::QAsymmU8)
358 {
359 CheckSizes(data, tensorInfo);
360 return ToFloatArray<uint8_t>(data, tensorInfo);
361 }
362 else if (tensorInfo.GetDataType() == DataType::Signed32)
363 {
364 CheckSizes(data, tensorInfo, 4);
365 std::vector<int32_t> buffer(tensorInfo.GetNumElements());
366 ::memcpy(buffer.data(), data.data(), data.size());
367 return ToFloatArray<int32_t>(buffer, tensorInfo);
368 }
369 else if (tensorInfo.GetDataType() == DataType::Signed64)
370 {
371 CheckSizes(data, tensorInfo, 8);
372 std::vector<int64_t> buffer(tensorInfo.GetNumElements());
373 ::memcpy(buffer.data(), data.data(), data.size());
374 return ToFloatArray<int64_t>(buffer, tensorInfo);
375 }
376 throw InvalidArgumentException(
377 fmt::format("Unsupported datatype {}. {}",
378 GetDataTypeName(tensorInfo.GetDataType()),
379 CHECK_LOCATION().AsString()));
380}
381
Matteo Martincigh9a5f9f22019-10-31 11:02:47 +0000382} // namespace armnnUtils