blob: d8a41fe41f37856412bf537b8e9552cf287c6799 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Mike Kelly0e3fe102023-01-23 19:32:06 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <armnn/Tensor.hpp>
8#include <armnn/DescriptorsFwd.hpp>
9
Matthew Sloyan171214c2020-09-09 09:07:37 +010010#include <armnn/utility/NumericCast.hpp>
11
telsoa014fcda012018-03-09 14:13:49 +000012#include <arm_compute/core/ITensor.h>
13#include <arm_compute/core/TensorInfo.h>
surmeh013537c2c2018-05-18 16:31:43 +010014#include <arm_compute/core/Types.h>
telsoa014fcda012018-03-09 14:13:49 +000015
Mike Kelly0a08ec62019-07-25 08:39:31 +010016#include <Half.hpp>
17
telsoa014fcda012018-03-09 14:13:49 +000018namespace armnn
19{
20class ITensorHandle;
21
22namespace armcomputetensorutils
23{
24
telsoa01c577f2c2018-08-31 09:22:23 +010025/// Utility function to map an armnn::DataType to corresponding arm_compute::DataType.
Derek Lambertid466a542020-01-22 15:37:29 +000026arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
telsoa014fcda012018-03-09 14:13:49 +000027
Cathal Corbettfd5bec42022-03-03 15:13:23 +000028/// Utility function to map an arm_compute::DataType to corresponding armnn::DataType.
29armnn::DataType GetArmNNDataType(arm_compute::DataType datatype);
30
Matthew Benthamfd899962018-12-31 15:49:42 +000031/// Utility function used to set up an arm_compute::Coordinates from a vector of ArmNN Axes for reduction functions
32arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
33 unsigned int originalInputRank,
34 const std::vector<unsigned int>& armnnAxes);
35
telsoa01c577f2c2018-08-31 09:22:23 +010036/// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape.
telsoa014fcda012018-03-09 14:13:49 +000037arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape);
38
Mike Kelly0e3fe102023-01-23 19:32:06 +000039/// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape. This will
40/// attempt to reduce the number of leading 1s until the dimension length is equal to the dimensions passed in.
41arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape, unsigned int dimensions);
42
telsoa014fcda012018-03-09 14:13:49 +000043/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
telsoa01c577f2c2018-08-31 09:22:23 +010044/// armnn::ITensorInfo.
telsoa014fcda012018-03-09 14:13:49 +000045arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
46
Francis Murtagh351d13d2018-09-24 15:01:18 +010047/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
Mike Kelly0e3fe102023-01-23 19:32:06 +000048/// armnn::ITensorInfo. This will attempt to reduce the number of leading 1s until the dimension length is equal
49/// to the dimensions passed in.
50arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo, unsigned int dimensions);
51
52/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
53/// armnn::ITensorInfo. This will attempt to reduce the number of leading 1s until the dimension length is equal
54/// to the dimensions passed in.
55arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
56 armnn::DataLayout dataLayout,
57 unsigned int dimensions);
58
59/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
Francis Murtagh351d13d2018-09-24 15:01:18 +010060/// armnn::ITensorInfo.
61/// armnn::DataLayout.
62arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
63 armnn::DataLayout dataLayout);
64
Mike Kelly0e3fe102023-01-23 19:32:06 +000065/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
66/// armnn::ITensorInfo. This will attempt to reduce the number of leading 1s until the dimension length is equal
67/// to the dimensions passed in.
68arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
69 armnn::DataLayout dataLayout, unsigned int dimensions);
70
Matteo Martincigh747ef822018-12-18 09:26:39 +000071/// Utility function used to convert armnn::DataLayout to arm_compute::DataLayout
72/// armnn::DataLayout.
73arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
74
Sadik Armagana3600ba2019-10-10 10:43:20 +010075/// Utility function used to setup an arm_compute::PoolingLayerInfo object from given
76/// armnn::Pooling2dDescriptor
77/// bool fpMixedPrecision
78arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
79 bool fpMixedPrecision = false);
telsoa014fcda012018-03-09 14:13:49 +000080
Ryan OSheabab8fa92022-03-09 10:29:02 +000081/// Utility function used to setup an arm_compute::Pooling3dLayerInfo object from given
82/// armnn::Pooling3dDescriptor
83/// bool fpMixedPrecision
84arm_compute::Pooling3dLayerInfo BuildArmComputePooling3dLayerInfo(const Pooling3dDescriptor& descriptor,
85 bool fpMixedPrecision = false);
86
telsoa01c577f2c2018-08-31 09:22:23 +010087/// Utility function to setup an arm_compute::NormalizationLayerInfo object from an armnn::NormalizationDescriptor.
telsoa014fcda012018-03-09 14:13:49 +000088arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc);
89
telsoa01c577f2c2018-08-31 09:22:23 +010090/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
Teresa Charlin6bc85252022-12-06 20:43:06 +000091/// \param perm PermutationVector used in Arm NN Permute layer
92/// \return PermutationVector used in ACL Transpose layer
93arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& perm);
telsoa014fcda012018-03-09 14:13:49 +000094
Mike Kellyc9ea45a2020-02-28 18:11:58 +000095/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
Teresa Charlin6bc85252022-12-06 20:43:06 +000096/// \param perm PermutationVector used in Arm NN Transpose layer
97/// \return PermutationVector used in ACL Transpose layer
98arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& perm);
Mike Kellyc9ea45a2020-02-28 18:11:58 +000099
Sadik Armaganf4464322018-12-20 16:19:12 +0000100/// Utility function used to setup an arm_compute::Size2D object from width and height values.
101arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
102
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +0100103/// Gets the appropriate PixelValue for the TensorInfo DataType
Kevin May263d7092022-11-29 14:34:48 +0000104arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float value);
Mike Kelly0a08ec62019-07-25 08:39:31 +0100105
Cathal Corbett4b19d222022-05-11 20:12:17 +0100106/// Computes the depth multiplier parameter for the Depthwise Conv2d ACL workload.
107unsigned int ComputeDepthwiseConv2dDepthMultiplier(armnn::DataLayout layout,
108 const arm_compute::TensorShape& weightsShape,
109 const arm_compute::TensorShape& inputShape);
110
Teresa Charlinca5c82a2023-03-28 11:00:36 +0100111/// Utility function used to setup an arm_compute::PadStrideInfo object from an ArmNN layer descriptor.
surmeh013537c2c2018-05-18 16:31:43 +0100112template <typename Descriptor>
Teresa Charlin5b701842023-05-16 12:27:28 +0100113arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor& descriptor)
surmeh013537c2c2018-05-18 16:31:43 +0100114{
115 return arm_compute::PadStrideInfo(descriptor.m_StrideX,
116 descriptor.m_StrideY,
117 descriptor.m_PadLeft,
118 descriptor.m_PadRight,
119 descriptor.m_PadTop,
120 descriptor.m_PadBottom,
121 arm_compute::DimensionRoundingType::FLOOR);
122}
123
David Monahan8a570462023-11-22 13:24:25 +0000124/// Utility function used to setup an arm_compute::Padding2D object from an armnn layer descriptor.
125template <typename Descriptor>
126arm_compute::Padding2D BuildArmComputePaddingInfo(const Descriptor &descriptor)
127{
128 return arm_compute::Padding2D(descriptor.m_PadLeft,
129 descriptor.m_PadRight,
130 descriptor.m_PadTop,
131 descriptor.m_PadBottom);
132}
133
Teresa Charlinca5c82a2023-03-28 11:00:36 +0100134/// Utility function used to setup an arm_compute::CropInfo object from an ArmNN layer descriptor.
135template <typename Descriptor>
Teresa Charlin2ea403d2023-06-19 12:06:19 +0100136arm_compute::CropInfo BuildArmComputeCropInfo(const Descriptor& descriptor, const unsigned int rank = 4)
Teresa Charlinca5c82a2023-03-28 11:00:36 +0100137{
Teresa Charlin2ea403d2023-06-19 12:06:19 +0100138 if (rank == 3)
139 {
140 return arm_compute::CropInfo(0, 0,
141 descriptor.m_Crops[0].first, descriptor.m_Crops[0].second);
142 }
143 else if (rank == 4)
144 {
145 return arm_compute::CropInfo(descriptor.m_Crops[1].first, descriptor.m_Crops[1].second,
146 descriptor.m_Crops[0].first, descriptor.m_Crops[0].second);
147 }
148 else
149 {
150 throw InvalidArgumentException("Tensor rank must be either 3 or 4", CHECK_LOCATION());
151 }
Teresa Charlinca5c82a2023-03-28 11:00:36 +0100152}
153
telsoa014fcda012018-03-09 14:13:49 +0000154/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
155template <typename Tensor>
156void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo)
157{
158 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
159}
160
Francis Murtagh351d13d2018-09-24 15:01:18 +0100161/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
162template <typename Tensor>
163void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
164{
165 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
166}
167
telsoa014fcda012018-03-09 14:13:49 +0000168template <typename Tensor>
169void InitialiseArmComputeTensorEmpty(Tensor& tensor)
170{
171 tensor.allocator()->allocate();
172}
173
telsoa01c577f2c2018-08-31 09:22:23 +0100174/// Utility function to free unused tensors after a workload is configured and prepared
175template <typename Tensor>
176void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
177{
178 if (tensor && !tensor->is_used())
179 {
180 tensor.reset(nullptr);
181 }
182}
183
telsoa014fcda012018-03-09 14:13:49 +0000184// Helper function to obtain byte offset into tensor data
185inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
Matthew Jacksondba634f2019-08-15 15:14:18 +0100186 uint32_t depthIndex,
telsoa014fcda012018-03-09 14:13:49 +0000187 uint32_t batchIndex,
188 uint32_t channelIndex,
189 uint32_t y,
190 uint32_t x)
191{
192 arm_compute::Coordinates coords;
Matthew Jacksondba634f2019-08-15 15:14:18 +0100193 coords.set(4, static_cast<int>(depthIndex));
telsoa01c577f2c2018-08-31 09:22:23 +0100194 coords.set(3, static_cast<int>(batchIndex));
195 coords.set(2, static_cast<int>(channelIndex));
196 coords.set(1, static_cast<int>(y));
197 coords.set(0, static_cast<int>(x));
Matthew Sloyan171214c2020-09-09 09:07:37 +0100198 return armnn::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
telsoa014fcda012018-03-09 14:13:49 +0000199}
200
telsoa01c577f2c2018-08-31 09:22:23 +0100201// Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
telsoa014fcda012018-03-09 14:13:49 +0000202inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info,
Matthew Jacksondba634f2019-08-15 15:14:18 +0100203 uint32_t depthIndex,
telsoa014fcda012018-03-09 14:13:49 +0000204 uint32_t batchIndex,
205 uint32_t channelIndex,
206 uint32_t y,
207 uint32_t x)
208{
209 const arm_compute::TensorShape& shape = info.tensor_shape();
telsoa01c577f2c2018-08-31 09:22:23 +0100210 uint32_t width = static_cast<uint32_t>(shape[0]);
211 uint32_t height = static_cast<uint32_t>(shape[1]);
212 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100213 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
214 return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
telsoa014fcda012018-03-09 14:13:49 +0000215}
216
217template <typename T>
218void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData)
219{
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
Matthew Jacksondba634f2019-08-15 15:14:18 +0100221 static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
telsoa014fcda012018-03-09 14:13:49 +0000222 {
223 const arm_compute::ITensorInfo& info = *srcTensor.info();
224 const arm_compute::TensorShape& shape = info.tensor_shape();
225 const uint8_t* const bufferPtr = srcTensor.buffer();
telsoa01c577f2c2018-08-31 09:22:23 +0100226 uint32_t width = static_cast<uint32_t>(shape[0]);
227 uint32_t height = static_cast<uint32_t>(shape[1]);
228 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
229 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100230 uint32_t depth = static_cast<uint32_t>(shape[4]);
telsoa014fcda012018-03-09 14:13:49 +0000231
Matthew Jacksondba634f2019-08-15 15:14:18 +0100232 for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
telsoa014fcda012018-03-09 14:13:49 +0000233 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100234 for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
telsoa014fcda012018-03-09 14:13:49 +0000235 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100236 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
telsoa014fcda012018-03-09 14:13:49 +0000237 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100238 for (unsigned int y = 0; y < height; ++y)
239 {
240 // Copies one row from arm_compute tensor buffer to linear memory buffer.
241 // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
242 memcpy(
243 dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
244 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
245 width * sizeof(T));
246 }
telsoa014fcda012018-03-09 14:13:49 +0000247 }
248 }
249 }
250 }
251}
252
253template <typename T>
254void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor)
255{
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
Matthew Jacksondba634f2019-08-15 15:14:18 +0100257 static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
telsoa014fcda012018-03-09 14:13:49 +0000258 {
259 const arm_compute::ITensorInfo& info = *dstTensor.info();
260 const arm_compute::TensorShape& shape = info.tensor_shape();
261 uint8_t* const bufferPtr = dstTensor.buffer();
telsoa01c577f2c2018-08-31 09:22:23 +0100262 uint32_t width = static_cast<uint32_t>(shape[0]);
263 uint32_t height = static_cast<uint32_t>(shape[1]);
264 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
265 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100266 uint32_t depth = static_cast<uint32_t>(shape[4]);
telsoa014fcda012018-03-09 14:13:49 +0000267
Matthew Jacksondba634f2019-08-15 15:14:18 +0100268 for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
telsoa014fcda012018-03-09 14:13:49 +0000269 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100270 for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
telsoa014fcda012018-03-09 14:13:49 +0000271 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100272 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
telsoa014fcda012018-03-09 14:13:49 +0000273 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100274 for (unsigned int y = 0; y < height; ++y)
275 {
276 // Copies one row from linear memory buffer to arm_compute tensor buffer.
277 // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
278 memcpy(
279 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
280 srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
281 width * sizeof(T));
282 }
telsoa014fcda012018-03-09 14:13:49 +0000283 }
284 }
285 }
286 }
287}
288
telsoa01c577f2c2018-08-31 09:22:23 +0100289/// Construct a TensorShape object from an ArmCompute object based on arm_compute::Dimensions.
290/// \tparam ArmComputeType Any type that implements the Dimensions interface
291/// \tparam T Shape value type
292/// \param shapelike An ArmCompute object that implements the Dimensions interface
293/// \param initial A default value to initialise the shape with
294/// \return A TensorShape object filled from the Acl shapelike object.
295template<typename ArmComputeType, typename T>
296TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
297{
298 std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
299 for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
300 {
Matthew Sloyan171214c2020-09-09 09:07:37 +0100301 s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
telsoa01c577f2c2018-08-31 09:22:23 +0100302 }
Matthew Sloyan171214c2020-09-09 09:07:37 +0100303 return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
telsoa01c577f2c2018-08-31 09:22:23 +0100304};
305
306/// Get the strides from an ACL strides object
307inline TensorShape GetStrides(const arm_compute::Strides& strides)
308{
309 return GetTensorShape(strides, 0U);
310}
311
312/// Get the shape from an ACL shape object
313inline TensorShape GetShape(const arm_compute::TensorShape& shape)
314{
315 return GetTensorShape(shape, 1U);
316}
317
telsoa014fcda012018-03-09 14:13:49 +0000318} // namespace armcomputetensorutils
319} // namespace armnn