blob: ba6ef6a3fe9a29f448e3a84ea6de315f2776e28f [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlinec5f7d12021-10-22 17:15:00 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <armnn/Tensor.hpp>
8#include <armnn/DescriptorsFwd.hpp>
9
Matthew Sloyan171214c2020-09-09 09:07:37 +010010#include <armnn/utility/NumericCast.hpp>
11
telsoa014fcda012018-03-09 14:13:49 +000012#include <arm_compute/core/ITensor.h>
13#include <arm_compute/core/TensorInfo.h>
surmeh013537c2c2018-05-18 16:31:43 +010014#include <arm_compute/core/Types.h>
telsoa014fcda012018-03-09 14:13:49 +000015
Mike Kelly0a08ec62019-07-25 08:39:31 +010016#include <Half.hpp>
17
telsoa014fcda012018-03-09 14:13:49 +000018namespace armnn
19{
20class ITensorHandle;
21
22namespace armcomputetensorutils
23{
24
telsoa01c577f2c2018-08-31 09:22:23 +010025/// Utility function to map an armnn::DataType to corresponding arm_compute::DataType.
Derek Lambertid466a542020-01-22 15:37:29 +000026arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
telsoa014fcda012018-03-09 14:13:49 +000027
Cathal Corbettfd5bec42022-03-03 15:13:23 +000028/// Utility function to map an arm_compute::DataType to corresponding armnn::DataType.
29armnn::DataType GetArmNNDataType(arm_compute::DataType datatype);
30
Matthew Benthamfd899962018-12-31 15:49:42 +000031/// Utility function used to set up an arm_compute::Coordinates from a vector of ArmNN Axes for reduction functions
32arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
33 unsigned int originalInputRank,
34 const std::vector<unsigned int>& armnnAxes);
35
telsoa01c577f2c2018-08-31 09:22:23 +010036/// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape.
telsoa014fcda012018-03-09 14:13:49 +000037arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape);
38
39/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
telsoa01c577f2c2018-08-31 09:22:23 +010040/// armnn::ITensorInfo.
telsoa014fcda012018-03-09 14:13:49 +000041arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
42
Francis Murtagh351d13d2018-09-24 15:01:18 +010043/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
44/// armnn::ITensorInfo.
45/// armnn::DataLayout.
46arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
47 armnn::DataLayout dataLayout);
48
Matteo Martincigh747ef822018-12-18 09:26:39 +000049/// Utility function used to convert armnn::DataLayout to arm_compute::DataLayout
50/// armnn::DataLayout.
51arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
52
Sadik Armagana3600ba2019-10-10 10:43:20 +010053/// Utility function used to setup an arm_compute::PoolingLayerInfo object from given
54/// armnn::Pooling2dDescriptor
55/// bool fpMixedPrecision
56arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
57 bool fpMixedPrecision = false);
telsoa014fcda012018-03-09 14:13:49 +000058
telsoa01c577f2c2018-08-31 09:22:23 +010059/// Utility function to setup an arm_compute::NormalizationLayerInfo object from an armnn::NormalizationDescriptor.
telsoa014fcda012018-03-09 14:13:49 +000060arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc);
61
telsoa01c577f2c2018-08-31 09:22:23 +010062/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
telsoa014fcda012018-03-09 14:13:49 +000063arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector);
64
Mike Kellyc9ea45a2020-02-28 18:11:58 +000065/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
66arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& vector);
67
Sadik Armaganf4464322018-12-20 16:19:12 +000068/// Utility function used to setup an arm_compute::Size2D object from width and height values.
69arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
70
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +010071/// Gets the appropriate PixelValue for the TensorInfo DataType
72arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue);
Mike Kelly0a08ec62019-07-25 08:39:31 +010073
telsoa01c577f2c2018-08-31 09:22:23 +010074/// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
surmeh013537c2c2018-05-18 16:31:43 +010075template <typename Descriptor>
76arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descriptor)
77{
78 return arm_compute::PadStrideInfo(descriptor.m_StrideX,
79 descriptor.m_StrideY,
80 descriptor.m_PadLeft,
81 descriptor.m_PadRight,
82 descriptor.m_PadTop,
83 descriptor.m_PadBottom,
84 arm_compute::DimensionRoundingType::FLOOR);
85}
86
telsoa014fcda012018-03-09 14:13:49 +000087/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
88template <typename Tensor>
89void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo)
90{
91 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
92}
93
Francis Murtagh351d13d2018-09-24 15:01:18 +010094/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
95template <typename Tensor>
96void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
97{
98 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
99}
100
telsoa014fcda012018-03-09 14:13:49 +0000101template <typename Tensor>
102void InitialiseArmComputeTensorEmpty(Tensor& tensor)
103{
104 tensor.allocator()->allocate();
105}
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107/// Utility function to free unused tensors after a workload is configured and prepared
108template <typename Tensor>
109void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
110{
111 if (tensor && !tensor->is_used())
112 {
113 tensor.reset(nullptr);
114 }
115}
116
telsoa014fcda012018-03-09 14:13:49 +0000117// Helper function to obtain byte offset into tensor data
118inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
Matthew Jacksondba634f2019-08-15 15:14:18 +0100119 uint32_t depthIndex,
telsoa014fcda012018-03-09 14:13:49 +0000120 uint32_t batchIndex,
121 uint32_t channelIndex,
122 uint32_t y,
123 uint32_t x)
124{
125 arm_compute::Coordinates coords;
Matthew Jacksondba634f2019-08-15 15:14:18 +0100126 coords.set(4, static_cast<int>(depthIndex));
telsoa01c577f2c2018-08-31 09:22:23 +0100127 coords.set(3, static_cast<int>(batchIndex));
128 coords.set(2, static_cast<int>(channelIndex));
129 coords.set(1, static_cast<int>(y));
130 coords.set(0, static_cast<int>(x));
Matthew Sloyan171214c2020-09-09 09:07:37 +0100131 return armnn::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
telsoa014fcda012018-03-09 14:13:49 +0000132}
133
telsoa01c577f2c2018-08-31 09:22:23 +0100134// Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
telsoa014fcda012018-03-09 14:13:49 +0000135inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info,
Matthew Jacksondba634f2019-08-15 15:14:18 +0100136 uint32_t depthIndex,
telsoa014fcda012018-03-09 14:13:49 +0000137 uint32_t batchIndex,
138 uint32_t channelIndex,
139 uint32_t y,
140 uint32_t x)
141{
142 const arm_compute::TensorShape& shape = info.tensor_shape();
telsoa01c577f2c2018-08-31 09:22:23 +0100143 uint32_t width = static_cast<uint32_t>(shape[0]);
144 uint32_t height = static_cast<uint32_t>(shape[1]);
145 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100146 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
147 return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
telsoa014fcda012018-03-09 14:13:49 +0000148}
149
150template <typename T>
151void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData)
152{
telsoa01c577f2c2018-08-31 09:22:23 +0100153 // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
Matthew Jacksondba634f2019-08-15 15:14:18 +0100154 static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
telsoa014fcda012018-03-09 14:13:49 +0000155 {
156 const arm_compute::ITensorInfo& info = *srcTensor.info();
157 const arm_compute::TensorShape& shape = info.tensor_shape();
158 const uint8_t* const bufferPtr = srcTensor.buffer();
telsoa01c577f2c2018-08-31 09:22:23 +0100159 uint32_t width = static_cast<uint32_t>(shape[0]);
160 uint32_t height = static_cast<uint32_t>(shape[1]);
161 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
162 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100163 uint32_t depth = static_cast<uint32_t>(shape[4]);
telsoa014fcda012018-03-09 14:13:49 +0000164
Matthew Jacksondba634f2019-08-15 15:14:18 +0100165 for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
telsoa014fcda012018-03-09 14:13:49 +0000166 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100167 for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
telsoa014fcda012018-03-09 14:13:49 +0000168 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100169 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
telsoa014fcda012018-03-09 14:13:49 +0000170 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100171 for (unsigned int y = 0; y < height; ++y)
172 {
173 // Copies one row from arm_compute tensor buffer to linear memory buffer.
174 // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
175 memcpy(
176 dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
177 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
178 width * sizeof(T));
179 }
telsoa014fcda012018-03-09 14:13:49 +0000180 }
181 }
182 }
183 }
184}
185
186template <typename T>
187void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor)
188{
telsoa01c577f2c2018-08-31 09:22:23 +0100189 // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
Matthew Jacksondba634f2019-08-15 15:14:18 +0100190 static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
telsoa014fcda012018-03-09 14:13:49 +0000191 {
192 const arm_compute::ITensorInfo& info = *dstTensor.info();
193 const arm_compute::TensorShape& shape = info.tensor_shape();
194 uint8_t* const bufferPtr = dstTensor.buffer();
telsoa01c577f2c2018-08-31 09:22:23 +0100195 uint32_t width = static_cast<uint32_t>(shape[0]);
196 uint32_t height = static_cast<uint32_t>(shape[1]);
197 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
198 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100199 uint32_t depth = static_cast<uint32_t>(shape[4]);
telsoa014fcda012018-03-09 14:13:49 +0000200
Matthew Jacksondba634f2019-08-15 15:14:18 +0100201 for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
telsoa014fcda012018-03-09 14:13:49 +0000202 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100203 for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
telsoa014fcda012018-03-09 14:13:49 +0000204 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100205 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
telsoa014fcda012018-03-09 14:13:49 +0000206 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100207 for (unsigned int y = 0; y < height; ++y)
208 {
209 // Copies one row from linear memory buffer to arm_compute tensor buffer.
210 // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
211 memcpy(
212 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
213 srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
214 width * sizeof(T));
215 }
telsoa014fcda012018-03-09 14:13:49 +0000216 }
217 }
218 }
219 }
220}
221
telsoa01c577f2c2018-08-31 09:22:23 +0100222/// Construct a TensorShape object from an ArmCompute object based on arm_compute::Dimensions.
223/// \tparam ArmComputeType Any type that implements the Dimensions interface
224/// \tparam T Shape value type
225/// \param shapelike An ArmCompute object that implements the Dimensions interface
226/// \param initial A default value to initialise the shape with
227/// \return A TensorShape object filled from the Acl shapelike object.
228template<typename ArmComputeType, typename T>
229TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
230{
231 std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
232 for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
233 {
Matthew Sloyan171214c2020-09-09 09:07:37 +0100234 s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
telsoa01c577f2c2018-08-31 09:22:23 +0100235 }
Matthew Sloyan171214c2020-09-09 09:07:37 +0100236 return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
telsoa01c577f2c2018-08-31 09:22:23 +0100237};
238
239/// Get the strides from an ACL strides object
240inline TensorShape GetStrides(const arm_compute::Strides& strides)
241{
242 return GetTensorShape(strides, 0U);
243}
244
245/// Get the shape from an ACL shape object
246inline TensorShape GetShape(const arm_compute::TensorShape& shape)
247{
248 return GetTensorShape(shape, 1U);
249}
250
telsoa014fcda012018-03-09 14:13:49 +0000251} // namespace armcomputetensorutils
252} // namespace armnn