IVGCVSW-4209 Create a public API for the ArmNN Utils
* Moved the relevant armnnUtils headers to the new location:
include/armnnUtils
* Update the header usage throughout the source code
!android-nn-driver:2387
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Change-Id: I2ba15cebcacafad2b5a1a7b9c3312ffc585e09d6
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index 2687a4e..76616f1 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -5,7 +5,7 @@
#include "ArgMinMax.hpp"
-#include <TensorUtils.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <boost/numeric/conversion/cast.hpp>
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 95a31fb..ca5110c 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,9 +5,10 @@
#pragma once
-#include "FloatingPointConverter.hpp"
-
#include <armnn/ArmNN.hpp>
+
+#include <armnnUtils/FloatingPointConverter.hpp>
+
#include <ResolveType.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/BatchNormImpl.cpp b/src/backends/reference/workloads/BatchNormImpl.cpp
index b80af8c..e742c72 100644
--- a/src/backends/reference/workloads/BatchNormImpl.cpp
+++ b/src/backends/reference/workloads/BatchNormImpl.cpp
@@ -8,7 +8,7 @@
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp
index b757d37..a375aaa 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.hpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp
@@ -5,15 +5,16 @@
#pragma once
-#include <armnn/Types.hpp>
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
#include "BaseIterator.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnn/Types.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
namespace armnn
{
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 7dba760..562fd3e 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -13,13 +13,11 @@
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <DataLayoutIndexed.hpp>
-
#include <cmath>
#include <limits>
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index dcd498c..b9cd7f9 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -6,8 +6,9 @@
#pragma once
#include "BaseIterator.hpp"
-#include "FloatingPointConverter.hpp"
-#include "TensorUtils.hpp"
+
+#include <armnnUtils/FloatingPointConverter.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index d500e9b..91ca160 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -5,8 +5,8 @@
#include "DepthToSpace.hpp"
-#include <DataLayoutIndexed.hpp>
-#include <Permute.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnnUtils/Permute.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index 5c0cffa..0d578d6 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -6,7 +6,8 @@
#pragma once
#include "BaseIterator.hpp"
-#include "TensorUtils.hpp"
+
+#include <armnnUtils/TensorUtils.hpp>
#include <boost/assert.hpp>
diff --git a/src/backends/reference/workloads/InstanceNorm.cpp b/src/backends/reference/workloads/InstanceNorm.cpp
index 9d6532f..08c555f 100644
--- a/src/backends/reference/workloads/InstanceNorm.cpp
+++ b/src/backends/reference/workloads/InstanceNorm.cpp
@@ -8,7 +8,7 @@
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 3fa3dc0..ddf5674 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -5,7 +5,7 @@
#include "LogSoftmax.hpp"
-#include <TensorUtils.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index cf83f8c..ea8f4ee 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -4,11 +4,12 @@
//
#include "Pooling2d.hpp"
-#include "DataLayoutIndexed.hpp"
#include <armnn/Exceptions.hpp>
#include <armnn/Types.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <limits>
diff --git a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
index 886e77a..ef813eb 100644
--- a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
@@ -4,9 +4,9 @@
//
#include "RefConvertFp16ToFp32Workload.hpp"
-
#include "RefWorkloadUtils.hpp"
-#include "FloatingPointConverter.hpp"
+
+#include <armnnUtils/FloatingPointConverter.hpp>
#include <Half.hpp>
diff --git a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
index 33270ad..559901f 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
@@ -4,12 +4,12 @@
//
#include "RefConvertFp32ToFp16Workload.hpp"
-
-#include "FloatingPointConverter.hpp"
#include "RefWorkloadUtils.hpp"
#include "Profiling.hpp"
-#include "Half.hpp"
+#include <armnnUtils/FloatingPointConverter.hpp>
+
+#include <Half.hpp>
namespace armnn
{
diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
index 3764b9a..6fec1ab 100644
--- a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
@@ -4,13 +4,13 @@
//
#include "RefL2NormalizationWorkload.hpp"
-
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
-#include "DataLayoutIndexed.hpp"
-#include "Profiling.hpp"
+#include <Profiling.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -21,80 +21,80 @@
namespace armnn
{
RefL2NormalizationWorkload::RefL2NormalizationWorkload(
- const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : BaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
+ const L2NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
- void RefL2NormalizationWorkload::Execute() const
+void RefL2NormalizationWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+ auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+
+ DataLayoutIndexed dataLayout(m_Data.m_Parameters.m_DataLayout);
+
+ const TensorShape& shape = inputInfo.GetShape();
+ unsigned int paddedShapeArray[4];
+ const int idxShift = 4 - boost::numeric_cast<int>(shape.GetNumDimensions());
+
+ const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
+ paddedShapeArray[0] = batches;
+
+ const int channelsIdx = boost::numeric_cast<int>(dataLayout.GetChannelsIndex());
+ const unsigned int channels = (channelsIdx - idxShift >= 0)
+ ? shape[boost::numeric_cast<unsigned int>(channelsIdx - idxShift)]
+ : 1;
+ paddedShapeArray[channelsIdx] = channels;
+
+ const int heightIdx = boost::numeric_cast<int>(dataLayout.GetHeightIndex());
+ const unsigned int height = (heightIdx - idxShift >= 0)
+ ? shape[boost::numeric_cast<unsigned int>(heightIdx - idxShift)]
+ : 1;
+ paddedShapeArray[heightIdx] = height;
+
+ const int widthIdx = boost::numeric_cast<int>(dataLayout.GetWidthIndex());
+ const unsigned int width = (widthIdx - idxShift >= 0)
+ ? shape[boost::numeric_cast<unsigned int>(widthIdx - idxShift)]
+ : 1;
+ paddedShapeArray[widthIdx] = width;
+
+ const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
+
+ for (unsigned int n = 0; n < batches; ++n)
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
-
- const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
-
- auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
- auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
-
- DataLayoutIndexed dataLayout(m_Data.m_Parameters.m_DataLayout);
-
- const TensorShape& shape = inputInfo.GetShape();
- unsigned int paddedShapeArray[4];
- const int idxShift = 4 - boost::numeric_cast<int>(shape.GetNumDimensions());
-
- const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
- paddedShapeArray[0] = batches;
-
- const int channelsIdx = boost::numeric_cast<int>(dataLayout.GetChannelsIndex());
- const unsigned int channels = (channelsIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(channelsIdx - idxShift)]
- : 1;
- paddedShapeArray[channelsIdx] = channels;
-
- const int heightIdx = boost::numeric_cast<int>(dataLayout.GetHeightIndex());
- const unsigned int height = (heightIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(heightIdx - idxShift)]
- : 1;
- paddedShapeArray[heightIdx] = height;
-
- const int widthIdx = boost::numeric_cast<int>(dataLayout.GetWidthIndex());
- const unsigned int width = (widthIdx - idxShift >= 0)
- ? shape[boost::numeric_cast<unsigned int>(widthIdx - idxShift)]
- : 1;
- paddedShapeArray[widthIdx] = width;
-
- const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
-
- for (unsigned int n = 0; n < batches; ++n)
+ for (unsigned int c = 0; c < channels; ++c)
{
- for (unsigned int c = 0; c < channels; ++c)
+ for (unsigned int h = 0; h < height; ++h)
{
- for (unsigned int h = 0; h < height; ++h)
+ for (unsigned int w = 0; w < width; ++w)
{
- for (unsigned int w = 0; w < width; ++w)
+ float reduction = 0.0;
+ for (unsigned int d = 0; d < channels; ++d)
{
- float reduction = 0.0;
- for (unsigned int d = 0; d < channels; ++d)
- {
- unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
+ unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
- (*inputDecoder)[inputIndex];
- const float value = inputDecoder->Get();
- reduction += value * value;
- }
-
- unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
-
- float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
-
- const float scale = 1.0f / sqrtf(maximum);
-
- (*inputDecoder)[index];
- (*outputEncoder)[index];
- outputEncoder->Set(inputDecoder->Get() * scale);
+ (*inputDecoder)[inputIndex];
+ const float value = inputDecoder->Get();
+ reduction += value * value;
}
+
+ unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
+
+ float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
+
+ const float scale = 1.0f / sqrtf(maximum);
+
+ (*inputDecoder)[index];
+ (*outputEncoder)[index];
+ outputEncoder->Set(inputDecoder->Get() * scale);
}
}
}
}
+}
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index 8ff2d9c..0427baf 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -4,14 +4,14 @@
//
#include "RefNormalizationWorkload.hpp"
-
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
#include <Profiling.hpp>
#include <boost/log/trivial.hpp>
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp
index 4d43b7e..4e7b76b 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.cpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp
@@ -6,7 +6,8 @@
#include "RefPermuteWorkload.hpp"
#include "RefWorkloadUtils.hpp"
-#include <Permute.hpp>
+#include <armnnUtils/Permute.hpp>
+
#include <ResolveType.hpp>
namespace armnn
diff --git a/src/backends/reference/workloads/Resize.hpp b/src/backends/reference/workloads/Resize.hpp
index 8bd8999..4c35794 100644
--- a/src/backends/reference/workloads/Resize.hpp
+++ b/src/backends/reference/workloads/Resize.hpp
@@ -6,9 +6,10 @@
#pragma once
#include "BaseIterator.hpp"
+
#include <armnn/Tensor.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
namespace armnn
{
@@ -20,4 +21,4 @@
armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW,
ResizeMethod resizeMethod = ResizeMethod::NearestNeighbor);
-} //namespace armnn
+} // namespace armnn
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index f745d81..5036389 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -5,7 +5,7 @@
#include "Softmax.hpp"
-#include <TensorUtils.hpp>
+#include <armnnUtils/TensorUtils.hpp>
#include <cmath>
#include <vector>
diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp
index 0bc2396..b6bab17 100644
--- a/src/backends/reference/workloads/SpaceToBatchNd.cpp
+++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp
@@ -5,7 +5,7 @@
#include "SpaceToBatchNd.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
using namespace armnnUtils;
diff --git a/src/backends/reference/workloads/SpaceToDepth.cpp b/src/backends/reference/workloads/SpaceToDepth.cpp
index 4a4f418..604a905 100644
--- a/src/backends/reference/workloads/SpaceToDepth.cpp
+++ b/src/backends/reference/workloads/SpaceToDepth.cpp
@@ -5,7 +5,7 @@
#include "SpaceToDepth.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
using namespace armnnUtils;
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index c064072..e03c42f 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -7,9 +7,9 @@
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <DataLayoutIndexed.hpp>
+#include <boost/assert.hpp>
namespace armnn
{
diff --git a/src/backends/reference/workloads/TransposeConvolution2d.cpp b/src/backends/reference/workloads/TransposeConvolution2d.cpp
index 5662c58..5698014 100644
--- a/src/backends/reference/workloads/TransposeConvolution2d.cpp
+++ b/src/backends/reference/workloads/TransposeConvolution2d.cpp
@@ -5,7 +5,7 @@
#include "TransposeConvolution2d.hpp"
-#include <DataLayoutIndexed.hpp>
+#include <armnnUtils/DataLayoutIndexed.hpp>
namespace armnn
{