blob: 30df31b79dae698faac63575434e097fc338129d [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlinec5f7d12021-10-22 17:15:00 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <armnn/Tensor.hpp>
8#include <armnn/DescriptorsFwd.hpp>
9
Matthew Sloyan171214c2020-09-09 09:07:37 +010010#include <armnn/utility/NumericCast.hpp>
11
telsoa014fcda012018-03-09 14:13:49 +000012#include <arm_compute/core/ITensor.h>
13#include <arm_compute/core/TensorInfo.h>
surmeh013537c2c2018-05-18 16:31:43 +010014#include <arm_compute/core/Types.h>
telsoa014fcda012018-03-09 14:13:49 +000015
Mike Kelly0a08ec62019-07-25 08:39:31 +010016#include <Half.hpp>
17
telsoa014fcda012018-03-09 14:13:49 +000018namespace armnn
19{
20class ITensorHandle;
21
22namespace armcomputetensorutils
23{
24
telsoa01c577f2c2018-08-31 09:22:23 +010025/// Utility function to map an armnn::DataType to corresponding arm_compute::DataType.
Derek Lambertid466a542020-01-22 15:37:29 +000026arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales);
telsoa014fcda012018-03-09 14:13:49 +000027
Matthew Benthamfd899962018-12-31 15:49:42 +000028/// Utility function used to set up an arm_compute::Coordinates from a vector of ArmNN Axes for reduction functions
29arm_compute::Coordinates BuildArmComputeReductionCoordinates(size_t inputDimensions,
30 unsigned int originalInputRank,
31 const std::vector<unsigned int>& armnnAxes);
32
telsoa01c577f2c2018-08-31 09:22:23 +010033/// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape.
telsoa014fcda012018-03-09 14:13:49 +000034arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape);
35
36/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
telsoa01c577f2c2018-08-31 09:22:23 +010037/// armnn::ITensorInfo.
telsoa014fcda012018-03-09 14:13:49 +000038arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo);
39
Francis Murtagh351d13d2018-09-24 15:01:18 +010040/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given
41/// armnn::ITensorInfo.
42/// armnn::DataLayout.
43arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo,
44 armnn::DataLayout dataLayout);
45
Matteo Martincigh747ef822018-12-18 09:26:39 +000046/// Utility function used to convert armnn::DataLayout to arm_compute::DataLayout
47/// armnn::DataLayout.
48arm_compute::DataLayout ConvertDataLayout(armnn::DataLayout dataLayout);
49
Sadik Armagana3600ba2019-10-10 10:43:20 +010050/// Utility function used to setup an arm_compute::PoolingLayerInfo object from given
51/// armnn::Pooling2dDescriptor
52/// bool fpMixedPrecision
53arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor,
54 bool fpMixedPrecision = false);
telsoa014fcda012018-03-09 14:13:49 +000055
telsoa01c577f2c2018-08-31 09:22:23 +010056/// Utility function to setup an arm_compute::NormalizationLayerInfo object from an armnn::NormalizationDescriptor.
telsoa014fcda012018-03-09 14:13:49 +000057arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc);
58
telsoa01c577f2c2018-08-31 09:22:23 +010059/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
telsoa014fcda012018-03-09 14:13:49 +000060arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector);
61
Mike Kellyc9ea45a2020-02-28 18:11:58 +000062/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector.
63arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::PermutationVector& vector);
64
Sadik Armaganf4464322018-12-20 16:19:12 +000065/// Utility function used to setup an arm_compute::Size2D object from width and height values.
66arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
67
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +010068/// Gets the appropriate PixelValue for the TensorInfo DataType
69arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue);
Mike Kelly0a08ec62019-07-25 08:39:31 +010070
telsoa01c577f2c2018-08-31 09:22:23 +010071/// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
surmeh013537c2c2018-05-18 16:31:43 +010072template <typename Descriptor>
73arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descriptor)
74{
75 return arm_compute::PadStrideInfo(descriptor.m_StrideX,
76 descriptor.m_StrideY,
77 descriptor.m_PadLeft,
78 descriptor.m_PadRight,
79 descriptor.m_PadTop,
80 descriptor.m_PadBottom,
81 arm_compute::DimensionRoundingType::FLOOR);
82}
83
telsoa014fcda012018-03-09 14:13:49 +000084/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
85template <typename Tensor>
86void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo)
87{
88 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
89}
90
Francis Murtagh351d13d2018-09-24 15:01:18 +010091/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor.
92template <typename Tensor>
93void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo, DataLayout dataLayout)
94{
95 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
96}
97
telsoa014fcda012018-03-09 14:13:49 +000098template <typename Tensor>
99void InitialiseArmComputeTensorEmpty(Tensor& tensor)
100{
101 tensor.allocator()->allocate();
102}
103
telsoa01c577f2c2018-08-31 09:22:23 +0100104/// Utility function to free unused tensors after a workload is configured and prepared
105template <typename Tensor>
106void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
107{
108 if (tensor && !tensor->is_used())
109 {
110 tensor.reset(nullptr);
111 }
112}
113
telsoa014fcda012018-03-09 14:13:49 +0000114// Helper function to obtain byte offset into tensor data
115inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info,
Matthew Jacksondba634f2019-08-15 15:14:18 +0100116 uint32_t depthIndex,
telsoa014fcda012018-03-09 14:13:49 +0000117 uint32_t batchIndex,
118 uint32_t channelIndex,
119 uint32_t y,
120 uint32_t x)
121{
122 arm_compute::Coordinates coords;
Matthew Jacksondba634f2019-08-15 15:14:18 +0100123 coords.set(4, static_cast<int>(depthIndex));
telsoa01c577f2c2018-08-31 09:22:23 +0100124 coords.set(3, static_cast<int>(batchIndex));
125 coords.set(2, static_cast<int>(channelIndex));
126 coords.set(1, static_cast<int>(y));
127 coords.set(0, static_cast<int>(x));
Matthew Sloyan171214c2020-09-09 09:07:37 +0100128 return armnn::numeric_cast<size_t>(info.offset_element_in_bytes(coords));
telsoa014fcda012018-03-09 14:13:49 +0000129}
130
telsoa01c577f2c2018-08-31 09:22:23 +0100131// Helper function to obtain element offset into data buffer representing tensor data (assuming no strides).
telsoa014fcda012018-03-09 14:13:49 +0000132inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info,
Matthew Jacksondba634f2019-08-15 15:14:18 +0100133 uint32_t depthIndex,
telsoa014fcda012018-03-09 14:13:49 +0000134 uint32_t batchIndex,
135 uint32_t channelIndex,
136 uint32_t y,
137 uint32_t x)
138{
139 const arm_compute::TensorShape& shape = info.tensor_shape();
telsoa01c577f2c2018-08-31 09:22:23 +0100140 uint32_t width = static_cast<uint32_t>(shape[0]);
141 uint32_t height = static_cast<uint32_t>(shape[1]);
142 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100143 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
144 return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
telsoa014fcda012018-03-09 14:13:49 +0000145}
146
147template <typename T>
148void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData)
149{
telsoa01c577f2c2018-08-31 09:22:23 +0100150 // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
Matthew Jacksondba634f2019-08-15 15:14:18 +0100151 static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
telsoa014fcda012018-03-09 14:13:49 +0000152 {
153 const arm_compute::ITensorInfo& info = *srcTensor.info();
154 const arm_compute::TensorShape& shape = info.tensor_shape();
155 const uint8_t* const bufferPtr = srcTensor.buffer();
telsoa01c577f2c2018-08-31 09:22:23 +0100156 uint32_t width = static_cast<uint32_t>(shape[0]);
157 uint32_t height = static_cast<uint32_t>(shape[1]);
158 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
159 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100160 uint32_t depth = static_cast<uint32_t>(shape[4]);
telsoa014fcda012018-03-09 14:13:49 +0000161
Matthew Jacksondba634f2019-08-15 15:14:18 +0100162 for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
telsoa014fcda012018-03-09 14:13:49 +0000163 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100164 for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
telsoa014fcda012018-03-09 14:13:49 +0000165 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100166 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
telsoa014fcda012018-03-09 14:13:49 +0000167 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100168 for (unsigned int y = 0; y < height; ++y)
169 {
170 // Copies one row from arm_compute tensor buffer to linear memory buffer.
171 // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
172 memcpy(
173 dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
174 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
175 width * sizeof(T));
176 }
telsoa014fcda012018-03-09 14:13:49 +0000177 }
178 }
179 }
180 }
181}
182
183template <typename T>
184void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor)
185{
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // If MaxNumOfTensorDimensions is increased, this loop will need fixing.
Matthew Jacksondba634f2019-08-15 15:14:18 +0100187 static_assert(MaxNumOfTensorDimensions == 5, "Please update CopyArmComputeITensorData");
telsoa014fcda012018-03-09 14:13:49 +0000188 {
189 const arm_compute::ITensorInfo& info = *dstTensor.info();
190 const arm_compute::TensorShape& shape = info.tensor_shape();
191 uint8_t* const bufferPtr = dstTensor.buffer();
telsoa01c577f2c2018-08-31 09:22:23 +0100192 uint32_t width = static_cast<uint32_t>(shape[0]);
193 uint32_t height = static_cast<uint32_t>(shape[1]);
194 uint32_t numChannels = static_cast<uint32_t>(shape[2]);
195 uint32_t numBatches = static_cast<uint32_t>(shape[3]);
Matthew Jacksondba634f2019-08-15 15:14:18 +0100196 uint32_t depth = static_cast<uint32_t>(shape[4]);
telsoa014fcda012018-03-09 14:13:49 +0000197
Matthew Jacksondba634f2019-08-15 15:14:18 +0100198 for (unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
telsoa014fcda012018-03-09 14:13:49 +0000199 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100200 for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
telsoa014fcda012018-03-09 14:13:49 +0000201 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100202 for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
telsoa014fcda012018-03-09 14:13:49 +0000203 {
Matthew Jacksondba634f2019-08-15 15:14:18 +0100204 for (unsigned int y = 0; y < height; ++y)
205 {
206 // Copies one row from linear memory buffer to arm_compute tensor buffer.
207 // A row is the largest contiguous region we can copy, as the tensor data may be using strides.
208 memcpy(
209 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
210 srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
211 width * sizeof(T));
212 }
telsoa014fcda012018-03-09 14:13:49 +0000213 }
214 }
215 }
216 }
217}
218
telsoa01c577f2c2018-08-31 09:22:23 +0100219/// Construct a TensorShape object from an ArmCompute object based on arm_compute::Dimensions.
220/// \tparam ArmComputeType Any type that implements the Dimensions interface
221/// \tparam T Shape value type
222/// \param shapelike An ArmCompute object that implements the Dimensions interface
223/// \param initial A default value to initialise the shape with
224/// \return A TensorShape object filled from the Acl shapelike object.
225template<typename ArmComputeType, typename T>
226TensorShape GetTensorShape(const ArmComputeType& shapelike, T initial)
227{
228 std::vector<unsigned int> s(MaxNumOfTensorDimensions, initial);
229 for (unsigned int i=0; i < shapelike.num_dimensions(); ++i)
230 {
Matthew Sloyan171214c2020-09-09 09:07:37 +0100231 s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
telsoa01c577f2c2018-08-31 09:22:23 +0100232 }
Matthew Sloyan171214c2020-09-09 09:07:37 +0100233 return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
telsoa01c577f2c2018-08-31 09:22:23 +0100234};
235
236/// Get the strides from an ACL strides object
237inline TensorShape GetStrides(const arm_compute::Strides& strides)
238{
239 return GetTensorShape(strides, 0U);
240}
241
242/// Get the shape from an ACL shape object
243inline TensorShape GetShape(const arm_compute::TensorShape& shape)
244{
245 return GetTensorShape(shape, 1U);
246}
247
telsoa014fcda012018-03-09 14:13:49 +0000248} // namespace armcomputetensorutils
249} // namespace armnn