IVGCVSW-2264 Move DataLayoutIndexed to armnnUtils
* Since DataLayoutIndexed is now required in the TF parser, this
changes move it to the armnnUtils library so that it'll be
accessible by the armnnTfParser
* Modified CMake files and Android.mk files accordingly
Change-Id: Ie2620359ef288aeff64cb9e9bec068a466eee0e9
diff --git a/src/backends/reference/workloads/BatchNormImpl.hpp b/src/backends/reference/workloads/BatchNormImpl.hpp
index fbcb2fd..799e7a3 100644
--- a/src/backends/reference/workloads/BatchNormImpl.hpp
+++ b/src/backends/reference/workloads/BatchNormImpl.hpp
@@ -10,6 +10,8 @@
#include <armnn/Tensor.hpp>
+#include <DataLayoutIndexed.hpp>
+
#include <cmath>
namespace armnn
@@ -34,7 +36,7 @@
outputData,
data.m_Parameters.m_DataLayout);
- DataLayoutIndexed dataLayout(data.m_Parameters.m_DataLayout);
+ armnnUtils::DataLayoutIndexed dataLayout(data.m_Parameters.m_DataLayout);
for (unsigned int c = 0; c < inputInfo.GetShape()[dataLayout.GetChannelsIndex()]; c++)
{
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
index 4313085..5f64213 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.cpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -11,6 +11,8 @@
#include <boost/assert.hpp>
+using namespace armnnUtils;
+
namespace armnn
{
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp
index 5d932cc..f08df93 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.hpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp
@@ -5,19 +5,21 @@
#pragma once
-#include <backendsCommon/DataLayoutIndexed.hpp>
+#include <armnn/Types.hpp>
+
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <armnn/Types.hpp>
+
+#include <DataLayoutIndexed.hpp>
namespace armnn
{
-void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
+void BatchToSpaceNd(const armnnUtils::DataLayoutIndexed& dataLayout,
const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo,
const std::vector<unsigned int>& blockShape,
const std::vector<std::pair<unsigned int, unsigned int>>& cropsData,
const float* inputData,
float* outputData);
-} // namespace armnn
\ No newline at end of file
+} // namespace armnn
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 0b9f8f7..b8e2dea 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -10,7 +10,7 @@
#include <armnn/Tensor.hpp>
-#include <backendsCommon/DataLayoutIndexed.hpp>
+#include <DataLayoutIndexed.hpp>
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -73,7 +73,7 @@
GetOutputTensorData<InputType>(0, data),
data.m_Parameters.m_DataLayout);
- const DataLayoutIndexed dataLayoutIndexed(data.m_Parameters.m_DataLayout);
+ const armnnUtils::DataLayoutIndexed dataLayoutIndexed(data.m_Parameters.m_DataLayout);
const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index d2fd0da..a9cac32 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -135,6 +135,8 @@
}
}
+using namespace armnnUtils;
+
namespace armnn
{
@@ -144,7 +146,7 @@
const TensorInfo& outputInfo,
const Pooling2dDescriptor& params)
{
- const armnn::DataLayoutIndexed dataLayout = params.m_DataLayout;
+ const DataLayoutIndexed dataLayout = params.m_DataLayout;
auto channelsIndex = dataLayout.GetChannelsIndex();
auto heightIndex = dataLayout.GetHeightIndex();
auto widthIndex = dataLayout.GetWidthIndex();
diff --git a/src/backends/reference/workloads/RefL2NormalizationFloat32Workload.cpp b/src/backends/reference/workloads/RefL2NormalizationFloat32Workload.cpp
index d21cfa9..bc82739 100644
--- a/src/backends/reference/workloads/RefL2NormalizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefL2NormalizationFloat32Workload.cpp
@@ -12,6 +12,8 @@
#include <cmath>
+using namespace armnnUtils;
+
namespace armnn
{
diff --git a/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp b/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
index 4cec023..3a2f2b9 100644
--- a/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
@@ -15,6 +15,8 @@
#include <boost/log/trivial.hpp>
#include <boost/numeric/conversion/cast.hpp>
+using namespace armnnUtils;
+
namespace armnn
{
diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/ResizeBilinear.cpp
index e098c6c..2d1087c 100644
--- a/src/backends/reference/workloads/ResizeBilinear.cpp
+++ b/src/backends/reference/workloads/ResizeBilinear.cpp
@@ -12,6 +12,8 @@
#include <cmath>
#include <algorithm>
+using namespace armnnUtils;
+
namespace armnn
{
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
index 3da8851..814a0f2 100644
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ b/src/backends/reference/workloads/ResizeBilinear.hpp
@@ -7,15 +7,15 @@
#include <armnn/Tensor.hpp>
-#include <backendsCommon/DataLayoutIndexed.hpp>
+#include <DataLayoutIndexed.hpp>
namespace armnn
{
-void ResizeBilinear(const float* in,
- const TensorInfo& inputInfo,
- float* out,
- const TensorInfo& outputInfo,
- DataLayoutIndexed dataLayout = DataLayout::NCHW);
+void ResizeBilinear(const float* in,
+ const TensorInfo& inputInfo,
+ float* out,
+ const TensorInfo& outputInfo,
+ armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW);
} //namespace armnn
diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp
index 6d0d004..51e45a8 100644
--- a/src/backends/reference/workloads/SpaceToBatchNd.cpp
+++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp
@@ -5,7 +5,9 @@
#include "SpaceToBatchNd.hpp"
-#include <backendsCommon/DataLayoutIndexed.hpp>
+#include <DataLayoutIndexed.hpp>
+
+using namespace armnnUtils;
namespace armnn
{
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index 5593ba6..aecec67 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -7,10 +7,10 @@
#include <armnn/Tensor.hpp>
-#include <backendsCommon/DataLayoutIndexed.hpp>
-
#include <boost/assert.hpp>
+#include <DataLayoutIndexed.hpp>
+
namespace armnn
{
@@ -19,7 +19,8 @@
class TensorBufferArrayView
{
public:
- TensorBufferArrayView(const TensorShape& shape, DataType* data, DataLayoutIndexed dataLayout = DataLayout::NCHW)
+ TensorBufferArrayView(const TensorShape& shape, DataType* data,
+ armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW)
: m_Shape(shape)
, m_Data(data)
, m_DataLayout(dataLayout)
@@ -60,9 +61,9 @@
}
private:
- const TensorShape m_Shape;
- DataType* m_Data;
- DataLayoutIndexed m_DataLayout;
+ const TensorShape m_Shape;
+ DataType* m_Data;
+ armnnUtils::DataLayoutIndexed m_DataLayout;
};
} //namespace armnn