IVGCVSW-2169 Remove DataLayoutIndexed from public API

Change-Id: If8d8087d9d365e467d3ca9bf9c40d7219cb75cfd
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 0abc758..b705abe 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -198,7 +198,7 @@
     uint32_t            m_StrideY;
     OutputShapeRounding m_OutputShapeRounding;
     PaddingMethod       m_PaddingMethod;
-    DataLayoutIndexed   m_DataLayout;
+    DataLayout   m_DataLayout;
 };
 
 struct FullyConnectedDescriptor
@@ -286,7 +286,7 @@
         : m_DataLayout(DataLayout::NCHW)
     {}
 
-    DataLayoutIndexed m_DataLayout;
+    DataLayout m_DataLayout;
 };
 
 struct BatchNormalizationDescriptor
@@ -297,7 +297,7 @@
     {}
 
     float m_Eps;
-    DataLayoutIndexed m_DataLayout;
+    DataLayout m_DataLayout;
 };
 
 struct BatchToSpaceNdDescriptor
@@ -316,7 +316,7 @@
 
     std::vector<unsigned int> m_BlockShape;
     std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
-    DataLayoutIndexed m_DataLayout;
+    DataLayout m_DataLayout;
 };
 
 struct FakeQuantizationDescriptor
@@ -340,7 +340,7 @@
 
     uint32_t          m_TargetWidth;
     uint32_t          m_TargetHeight;
-    DataLayoutIndexed m_DataLayout;
+    DataLayout m_DataLayout;
 };
 
 struct ReshapeDescriptor
@@ -371,7 +371,7 @@
 
     std::vector<unsigned int> m_BlockShape;
     std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
-    DataLayoutIndexed m_DataLayout;
+    DataLayout m_DataLayout;
 };
 
 // temporary descriptor for Lstm
@@ -455,7 +455,7 @@
     int32_t m_EllipsisMask;
     int32_t m_NewAxisMask;
 
-    DataLayoutIndexed m_DataLayout;
+    DataLayout m_DataLayout;
 };
 
 }
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index cd6e17b..d815005 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -31,56 +31,12 @@
     Signed32  = 3
 };
 
-// Begin: DataLayout
-
 enum class DataLayout
 {
     NCHW = 1,
     NHWC = 2
 };
 
-// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout
-class DataLayoutIndexed
-{
-public:
-    DataLayoutIndexed(DataLayout dataLayout) : m_DataLayout(dataLayout)
-    {
-        switch (dataLayout)
-        {
-            case DataLayout::NHWC:
-                m_ChannelsIndex = 3;
-                m_HeightIndex   = 1;
-                m_WidthIndex    = 2;
-                break;
-            case DataLayout::NCHW:
-                m_ChannelsIndex = 1;
-                m_HeightIndex   = 2;
-                m_WidthIndex    = 3;
-                break;
-            default:
-                throw InvalidArgumentException("Unknown DataLayout value: " +
-                                               std::to_string(static_cast<int>(dataLayout)));
-        }
-    }
-
-    DataLayout   GetDataLayout()    const { return m_DataLayout; }
-    unsigned int GetChannelsIndex() const { return m_ChannelsIndex; }
-    unsigned int GetHeightIndex()   const { return m_HeightIndex; }
-    unsigned int GetWidthIndex()    const { return m_WidthIndex; }
-
-private:
-    DataLayout   m_DataLayout;
-    unsigned int m_ChannelsIndex;
-    unsigned int m_HeightIndex;
-    unsigned int m_WidthIndex;
-};
-
-// Conversion methods - implementations in src/armnn/InternalTypes.cpp
-bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed);
-bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout);
-
-// End: DataLayout
-
 enum class ActivationFunction
 {
     Sigmoid     = 0,
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index dade1f7..f37b1a0 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -51,16 +51,4 @@
     }
 }
 
-// Definition in include/armnn/Types.hpp
-bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed)
-{
-    return dataLayout == indexed.GetDataLayout();
-}
-
-// Definition in include/armnn/Types.hpp
-bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout)
-{
-    return indexed.GetDataLayout() == dataLayout;
-}
-
 }
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index aff818e..e1b78b2 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -10,6 +10,7 @@
 
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
@@ -50,7 +51,7 @@
 
 std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    const DataLayoutIndexed & dataLayout = m_Param.m_DataLayout;
+    const DataLayoutIndexed dataLayout = m_Param.m_DataLayout;
     const TensorShape& inputShape = inputShapes[0];
     unsigned int inBatchSize = inputShape[0];
     unsigned int channelSize = inputShape[dataLayout.GetChannelsIndex()];
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index f3597e2..4d3553f 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -8,6 +8,7 @@
 
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
 namespace armnn
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index f356e39..6ad32a7 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -8,6 +8,7 @@
 
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
 namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 821c011..24b7a69 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -7,6 +7,7 @@
 #include "LayerCloneBase.hpp"
 
 #include <armnn/TypesUtils.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
@@ -33,13 +34,14 @@
 {
     BOOST_ASSERT(inputShapes.size() == 1);
     const TensorShape& inputShape = inputShapes[0];
+    const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
 
     // If we support multiple batch dimensions in the future, then this assert will need to change.
     BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
 
-    unsigned int inWidth = inputShape[m_Param.m_DataLayout.GetWidthIndex()];
-    unsigned int inHeight = inputShape[m_Param.m_DataLayout.GetHeightIndex()];
-    unsigned int inChannels = inputShape[m_Param.m_DataLayout.GetChannelsIndex()];
+    unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
+    unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
+    unsigned int inChannels = inputShape[dimensionIndices.GetChannelsIndex()];
     unsigned int inBatchSize = inputShape[0];
 
     bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0);
diff --git a/src/armnn/layers/ResizeBilinearLayer.cpp b/src/armnn/layers/ResizeBilinearLayer.cpp
index 69ce69e..f72ccfc 100644
--- a/src/armnn/layers/ResizeBilinearLayer.cpp
+++ b/src/armnn/layers/ResizeBilinearLayer.cpp
@@ -7,6 +7,7 @@
 #include "LayerCloneBase.hpp"
 
 #include <armnn/TypesUtils.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
@@ -34,10 +35,10 @@
 {
     BOOST_ASSERT(inputShapes.size() == 1);
     const TensorShape& inputShape = inputShapes[0];
-
+    const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
     unsigned int outWidth = m_Param.m_TargetWidth;
     unsigned int outHeight = m_Param.m_TargetHeight;
-    unsigned int outChannels = inputShape[m_Param.m_DataLayout.GetChannelsIndex()];
+    unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()];
     unsigned int outBatch = inputShape[0];
 
     TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index cc93886..658945e 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -9,6 +9,7 @@
 
 #include <armnn/TypesUtils.hpp>
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
@@ -48,8 +49,9 @@
                                                      1U,
                                                      std::multiplies<>());
 
-    unsigned int heightIndex = m_Param.m_DataLayout.GetHeightIndex();
-    unsigned int widthIndex = m_Param.m_DataLayout.GetWidthIndex();
+    DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
+    unsigned int heightIndex = dimensionIndices.GetHeightIndex();
+    unsigned int widthIndex = dimensionIndices.GetWidthIndex();
 
     std::pair<unsigned int, unsigned int> heightPad = m_Param.m_PadList[0];
     std::pair<unsigned int, unsigned int> widthPad = m_Param.m_PadList[1];
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 1a9bd56..b8ba72f 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -8,6 +8,7 @@
 
 #include <boost/cast.hpp>
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
@@ -133,11 +134,11 @@
 
 template <typename BatchNormalizationFloat32Workload, armnn::DataType DataType>
 std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkloadTest(
-    armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayoutIndexed dataLayout = DataLayout::NCHW)
+    armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
 {
 
     TensorShape tensorShape;
-    switch (dataLayout.GetDataLayout())
+    switch (dataLayout)
     {
         case DataLayout::NHWC:
             tensorShape = { 2, 4, 4, 3 };
@@ -184,7 +185,7 @@
     BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
     BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
     BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
+    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -842,13 +843,12 @@
 template <typename ResizeBilinearWorkload, armnn::DataType DataType>
 std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
                                                                          armnn::Graph& graph,
-                                                                         DataLayoutIndexed dataLayout =
-                                                                             DataLayout::NCHW)
+                                                                         DataLayout dataLayout = DataLayout::NCHW)
 {
     TensorShape inputShape;
     TensorShape outputShape;
 
-    switch (dataLayout.GetDataLayout()) {
+    switch (dataLayout) {
         case DataLayout::NHWC:
             inputShape =  { 2, 4, 4, 3 };
             outputShape = { 2, 2, 2, 3 };
@@ -861,8 +861,9 @@
 
     // Creates the layer we're testing.
     ResizeBilinearDescriptor resizeDesc;
-    resizeDesc.m_TargetWidth = outputShape[dataLayout.GetWidthIndex()];
-    resizeDesc.m_TargetHeight = outputShape[dataLayout.GetHeightIndex()];
+    DataLayoutIndexed dimensionIndices = dataLayout;
+    resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
+    resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()];
     resizeDesc.m_DataLayout = dataLayout;
     Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
 
@@ -883,7 +884,7 @@
     ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
+    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index 4501c35..238232a 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -216,9 +216,9 @@
                                 unsigned int numberOfChannels,
                                 unsigned int height,
                                 unsigned int width,
-                                const armnn::DataLayoutIndexed& dataLayout)
+                                const armnn::DataLayout dataLayout)
 {
-    switch (dataLayout.GetDataLayout())
+    switch (dataLayout)
     {
         case armnn::DataLayout::NCHW:
             return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
@@ -226,6 +226,6 @@
             return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
         default:
             throw armnn::InvalidArgumentException("unknown data layout ["
-                                                  + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
+                                                  + std::to_string(static_cast<int>(dataLayout)) + "]");
     }
 }
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index 1fe9888..f9bded76f 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -9,6 +9,8 @@
     CpuTensorHandle.cpp
     CpuTensorHandleFwd.hpp
     CpuTensorHandle.hpp
+    DataLayoutIndexed.hpp
+    DataLayoutIndexed.cpp
     IBackendInternal.hpp
     IBackendContext.hpp
     ILayerSupport.cpp
diff --git a/src/backends/backendsCommon/DataLayoutIndexed.cpp b/src/backends/backendsCommon/DataLayoutIndexed.cpp
new file mode 100644
index 0000000..b99d52c
--- /dev/null
+++ b/src/backends/backendsCommon/DataLayoutIndexed.cpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DataLayoutIndexed.hpp"
+
+namespace armnn {
+
+// Definition in include/armnn/Types.hpp
+bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed)
+{
+    return dataLayout == indexed.GetDataLayout();
+}
+
+// Definition in include/armnn/Types.hpp
+bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout)
+{
+    return indexed.GetDataLayout() == dataLayout;
+}
+
+}
diff --git a/src/backends/backendsCommon/DataLayoutIndexed.hpp b/src/backends/backendsCommon/DataLayoutIndexed.hpp
new file mode 100644
index 0000000..8547475
--- /dev/null
+++ b/src/backends/backendsCommon/DataLayoutIndexed.hpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+#include <armnn/Types.hpp>
+
+namespace armnn
+{
+
+// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout
+class DataLayoutIndexed
+{
+public:
+    DataLayoutIndexed(DataLayout dataLayout) : m_DataLayout(dataLayout)
+    {
+        switch (dataLayout)
+        {
+            case DataLayout::NHWC:
+                m_ChannelsIndex = 3;
+                m_HeightIndex   = 1;
+                m_WidthIndex    = 2;
+                break;
+            case DataLayout::NCHW:
+                m_ChannelsIndex = 1;
+                m_HeightIndex   = 2;
+                m_WidthIndex    = 3;
+                break;
+            default:
+                throw InvalidArgumentException("Unknown DataLayout value: " +
+                                               std::to_string(static_cast<int>(dataLayout)));
+        }
+    }
+
+    DataLayout   GetDataLayout()    const { return m_DataLayout; }
+    unsigned int GetChannelsIndex() const { return m_ChannelsIndex; }
+    unsigned int GetHeightIndex()   const { return m_HeightIndex; }
+    unsigned int GetWidthIndex()    const { return m_WidthIndex; }
+
+private:
+    DataLayout   m_DataLayout;
+    unsigned int m_ChannelsIndex;
+    unsigned int m_HeightIndex;
+    unsigned int m_WidthIndex;
+};
+
+// Equality methods
+bool operator==(const DataLayout& dataLayout, const DataLayoutIndexed& indexed);
+bool operator==(const DataLayoutIndexed& indexed, const DataLayout& dataLayout);
+
+}
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 18ab4a8..d5e3638 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -6,6 +6,8 @@
 
 #include "CpuTensorHandle.hpp"
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
+
 #include <algorithm>
 #include <iomanip>
 #include <string>
@@ -675,10 +677,11 @@
     }
 
     {
+        DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
         const unsigned int inputChannelCount =
-            workloadInfo.m_InputTensorInfos[0].GetShape()[this->m_Parameters.m_DataLayout.GetChannelsIndex()];
+            workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
         const unsigned int outputChannelCount =
-            workloadInfo.m_OutputTensorInfos[0].GetShape()[this->m_Parameters.m_DataLayout.GetChannelsIndex()];
+            workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
         if (inputChannelCount != outputChannelCount)
         {
             throw InvalidArgumentException(
@@ -774,14 +777,15 @@
     std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
     std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
 
-    unsigned int inputHeight = inputShape[m_Parameters.m_DataLayout.GetHeightIndex()]
+    DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+    unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
                                + heightPad.first + heightPad.second;
 
-    unsigned int inputWidth = inputShape[m_Parameters.m_DataLayout.GetWidthIndex()]
+    unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
                               + widthPad.first + widthPad.second;
 
     unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
-                                    * inputShape[m_Parameters.m_DataLayout.GetChannelsIndex()];
+                                    * inputShape[dimensionIndices.GetChannelsIndex()];
 
     if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
     {
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 8d29316..c99dd39 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -9,6 +9,7 @@
 
 COMMON_SOURCES := \
     BackendRegistry.cpp \
+    DataLayoutIndexed.cpp \
     CpuTensorHandle.cpp \
     ILayerSupport.cpp \
     MemCopyWorkload.cpp \
diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
index d99b7f7..6685a8e 100755
--- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
@@ -14,6 +14,7 @@
 #include <test/TensorHelpers.hpp>
 #include "QuantizeHelper.hpp"
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -75,7 +76,7 @@
     const boost::multi_array<T, 4>& originalOutputExpected,
     float qScale,
     int32_t qOffset,
-    const armnn::DataLayoutIndexed& layout = armnn::DataLayout::NCHW,
+    const armnn::DataLayout layout = armnn::DataLayout::NCHW,
     uint32_t padLeft = 0,
     uint32_t padTop = 0,
     uint32_t padRight = 0,
@@ -137,7 +138,7 @@
 
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -166,7 +167,7 @@
     outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
 
     // at this point if we require it permute the expected output
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(outputData.size());
         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data());
@@ -187,7 +188,7 @@
     armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
     // Permute the kernel if necessary
     boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data());
     }
@@ -210,7 +211,7 @@
     data.m_Parameters.m_PadTop = padTop;
     data.m_Parameters.m_PadBottom = padBottom;
     data.m_Parameters.m_BiasEnabled = biasEnabled;
-    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+    data.m_Parameters.m_DataLayout = layout;
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
     inputHandle->Allocate();
@@ -327,7 +328,7 @@
     const boost::multi_array<T, 4>& outputExpected,
     float qScale,
     int32_t qOffset,
-    const armnn::DataLayoutIndexed& layout,
+    const armnn::DataLayout layout,
     uint32_t padLeft = 0,
     uint32_t padTop = 0,
     uint32_t padRight = 0,
@@ -377,7 +378,7 @@
 
     // At this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -401,7 +402,7 @@
     LayerTestResult<T, 4> ret(outputTensorInfo);
 
     // At this point if we require it permute the expected output
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(outputData.size());
         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data());
@@ -417,7 +418,7 @@
 
     // Permute the kernel if necessary
     boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data());
     }
@@ -440,7 +441,7 @@
     data.m_Parameters.m_PadTop = padTop;
     data.m_Parameters.m_PadBottom = padBottom;
     data.m_Parameters.m_BiasEnabled = biasEnabled;
-    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+    data.m_Parameters.m_DataLayout = layout;
 
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -466,7 +467,7 @@
     float qScale,
     int32_t qOffset,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     unsigned int inputHeight = 3;
     unsigned int inputWidth = 3;
@@ -511,7 +512,7 @@
             }));
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -533,7 +534,7 @@
                     0.f, 0.f,  0.f,
                     -1.f, 0.f, -1.f,
             }));
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(kernelData.size());
         armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, kernelData.data(), tmp.data());
@@ -557,7 +558,7 @@
     }
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(outputImage.size());
         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data());
@@ -589,7 +590,7 @@
     data.m_Parameters.m_PadTop = 0;
     data.m_Parameters.m_PadBottom = 0;
     data.m_Parameters.m_BiasEnabled = biasEnabled;
-    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+    data.m_Parameters.m_DataLayout = layout;
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
     inputHandle->Allocate();
@@ -611,7 +612,7 @@
     float qScale,
     int32_t qOffset,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     unsigned int depthMultiplier = 2;
 
@@ -672,7 +673,7 @@
     std::vector<T> inputData = originalInputData;
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, originalInputData.data(), inputData.data());
     }
@@ -709,7 +710,7 @@
                     0, 0, 0
             }));
     std::vector<T> kernelData = originalKernelData;
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernelData.data(), kernelData.data());
     }
@@ -762,7 +763,7 @@
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
     std::vector<T> outputImage = originalOutputImage;
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, originalOutputImage.data(), outputImage.data());
     }
@@ -792,7 +793,7 @@
     data.m_Parameters.m_PadTop = 1;
     data.m_Parameters.m_PadBottom = 1;
     data.m_Parameters.m_BiasEnabled = biasEnabled;
-    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+    data.m_Parameters.m_DataLayout = layout;
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
     inputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index caa4f40..ecd09ca 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -109,7 +109,7 @@
     float qScale,
     int32_t qOffset,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     // Use common single-batch 3-channel 16x8 image.
     armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
@@ -192,7 +192,7 @@
     float qScale,
     int32_t qOffset,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
 
@@ -315,7 +315,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
 }
@@ -324,7 +324,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
@@ -333,7 +333,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
 }
@@ -355,7 +355,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
@@ -364,7 +364,7 @@
 LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout,
+    const armnn::DataLayout layout,
     float qScale,
     int32_t qOffset)
 {
@@ -426,7 +426,7 @@
 LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout,
+    const armnn::DataLayout layout,
     float qScale,
     int32_t qOffset)
 {
@@ -485,7 +485,7 @@
     float qScale,
     int32_t qOffset,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     // Use a single-batch 2-channel 5x5 image as input.
     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
@@ -673,7 +673,7 @@
 Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
          workloadFactory, memoryManager, layout, 0.0f, 0);
@@ -682,7 +682,7 @@
 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
         workloadFactory, memoryManager, layout, 0.0f, 0);
@@ -692,7 +692,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return DepthwiseConvolution2dTestImpl<float, float>(
         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
@@ -710,7 +710,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
@@ -720,7 +720,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return DepthwiseConvolution2dAsymmetricTestCommon<float>(
         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
@@ -730,7 +730,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
@@ -740,7 +740,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
@@ -775,7 +775,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
 }
@@ -784,13 +784,13 @@
     armnn::IWorkloadFactory&,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
     armnn::IWorkloadFactory&,
-    const armnn::DataLayoutIndexed&);
+    const armnn::DataLayout);
 
 template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
     armnn::IWorkloadFactory&,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
     armnn::IWorkloadFactory&,
-    const armnn::DataLayoutIndexed&);
+    const armnn::DataLayout);
 
 LayerTestResult<float,4> SimpleNormalizationAcrossTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -3857,7 +3857,7 @@
 LayerTestResult<float, 4> ResizeBilinearNopTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
     const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
@@ -3875,7 +3875,7 @@
     });
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -3911,7 +3911,7 @@
 LayerTestResult<float, 4> SimpleResizeBilinearTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
     const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
@@ -3937,7 +3937,7 @@
     });
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -3977,7 +3977,7 @@
 LayerTestResult<float, 4> ResizeBilinearSqMinTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
     const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
@@ -4003,7 +4003,7 @@
     });
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -4043,7 +4043,7 @@
 LayerTestResult<float, 4> ResizeBilinearMinTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
     const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
@@ -4067,7 +4067,7 @@
     });
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -4107,7 +4107,7 @@
 LayerTestResult<float, 4> ResizeBilinearMagTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
     const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
@@ -4133,7 +4133,7 @@
     });
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -4235,7 +4235,7 @@
     const armnn::TensorShape& inputOutputTensorShape,
     const std::vector<float>& inputValues,
     const std::vector<float>& expectedOutputValues,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
@@ -4243,7 +4243,7 @@
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     std::vector<float> inputData = inputValues;
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -4254,7 +4254,7 @@
 
     LayerTestResult<float, 4> result(outputTensorInfo);
     std::vector<float> expectedOutputData = expectedOutputValues;
-    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (layout == armnn::DataLayout::NHWC)
     {
         std::vector<float> tmp(expectedOutputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
@@ -4266,7 +4266,7 @@
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::L2NormalizationQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_DataLayout = layout.GetDataLayout();
+    descriptor.m_Parameters.m_DataLayout = layout;
     armnn::WorkloadInfo info;
 
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
@@ -4729,7 +4729,7 @@
 LayerTestResult<float, 4> L2Normalization1dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     // Width: 1
     // Height: 1
@@ -4799,7 +4799,7 @@
 LayerTestResult<float, 4> L2Normalization2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     // Width: 5
     // Height: 1
@@ -4844,7 +4844,7 @@
 LayerTestResult<float, 4> L2Normalization3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
     // Width: 3
     // Height: 4
@@ -4909,7 +4909,7 @@
 LayerTestResult<float, 4> L2Normalization4dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout)
+    const armnn::DataLayout layout)
 {
         // Width: 3
     // Height: 4
@@ -6357,7 +6357,7 @@
 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
 }
@@ -6365,7 +6365,7 @@
 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
 }
@@ -6373,7 +6373,7 @@
 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
 }
@@ -6381,7 +6381,7 @@
 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     return SimpleAveragePooling2dTestCommon<uint8_t>(
         workloadFactory, memoryManager, dataLayout, 0.5, -1);
@@ -6413,7 +6413,7 @@
 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
 }
@@ -6421,7 +6421,7 @@
 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout)
+    const armnn::DataLayout dataLayout)
 {
     return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
 }
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 15d0853..498cfb7 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -58,13 +58,13 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -75,12 +75,12 @@
 Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float,   4> Convolution1dTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -96,7 +96,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -107,13 +107,13 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float,   4> SimpleMaxPooling2dSize2x2Stride2x2Test(
     armnn::IWorkloadFactory& workloadFactory,
@@ -154,22 +154,22 @@
 LayerTestResult<float,   4> SimpleMaxPooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 LayerTestResult<float,   4> SimpleAveragePooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 LayerTestResult<float,   4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
     armnn::IWorkloadFactory& workloadFactory,
@@ -203,12 +203,12 @@
 LayerTestResult<float,   4> SimpleL2Pooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 LayerTestResult<float,   4> L2Pooling2dSize3Stride1Test(
     armnn::IWorkloadFactory& workloadFactory,
@@ -464,7 +464,7 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> CompareNormalizationTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -606,32 +606,32 @@
 LayerTestResult<float, 4> ResizeBilinearNopTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 // Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image.
 LayerTestResult<float, 4> SimpleResizeBilinearTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 // Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a
 // multiple of output dimensions).
 LayerTestResult<float, 4> ResizeBilinearSqMinTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 // Tests the resize bilinear for minification (output dimensions smaller than input dimensions).
 LayerTestResult<float, 4> ResizeBilinearMinTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout dataLayout);
 
 // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions).
 LayerTestResult<float, 4> ResizeBilinearMagTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout);
+    const armnn::DataLayout  dataLayout);
 
 LayerTestResult<float, 4> BatchNormTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -648,22 +648,22 @@
 LayerTestResult<float, 4> L2Normalization1dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2Normalization2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2Normalization3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2Normalization4dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> ConstantTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -765,25 +765,25 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
-    const armnn::DataLayoutIndexed& layout);
+    const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
index 2e851fa..9050fc6 100644
--- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
@@ -34,10 +34,11 @@
     const boost::multi_array<T, 4>& input,
     const boost::multi_array<T, 4>& outputExpected)
 {
-    const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout;
-    auto heightIndex = dataLayout.GetHeightIndex();
-    auto widthIndex = dataLayout.GetWidthIndex();
-    auto channelsIndex = dataLayout.GetChannelsIndex();
+    const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
+    const armnn::DataLayoutIndexed dimensionIndices = dataLayout;
+    auto heightIndex = dimensionIndices.GetHeightIndex();
+    auto widthIndex = dimensionIndices.GetWidthIndex();
+    auto channelsIndex = dimensionIndices.GetChannelsIndex();
 
     unsigned int inputHeight     = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
     unsigned int inputWidth      = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
@@ -240,7 +241,7 @@
 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW,
+    const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
@@ -286,7 +287,7 @@
         }));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -309,7 +310,7 @@
 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
+    armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
@@ -355,7 +356,7 @@
         }));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
@@ -429,7 +430,7 @@
 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
+    armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
@@ -466,7 +467,7 @@
         }));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    if (dataLayout == armnn::DataLayout::NHWC)
     {
         std::vector<T> tmp(inputData.size());
         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
diff --git a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
index c0f6cdf..fa0be85 100644
--- a/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClBatchNormalizationFloatWorkload.cpp
@@ -23,20 +23,18 @@
                                                  const TensorInfo& gamma,
                                                  const BatchNormalizationDescriptor &desc)
 {
-    const DataLayout dataLayout = desc.m_DataLayout.GetDataLayout();
-
     const arm_compute::TensorInfo aclInputInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(input, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(input, desc.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(output, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(output, desc.m_DataLayout);
     const arm_compute::TensorInfo aclMeanInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(mean, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(mean, desc.m_DataLayout);
     const arm_compute::TensorInfo aclVarInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(var, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(var, desc.m_DataLayout);
     const arm_compute::TensorInfo aclBetaInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(beta, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(beta, desc.m_DataLayout);
     const arm_compute::TensorInfo aclGammaInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(gamma, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(gamma, desc.m_DataLayout);
 
     return arm_compute::CLBatchNormalizationLayer::validate(&aclInputInfo,
                                                             &aclOutputInfo,
@@ -68,7 +66,7 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index 74e40ec..2226e09 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -18,12 +18,10 @@
                                                       const TensorInfo& output,
                                                       const L2NormalizationDescriptor& descriptor)
 {
-    const arm_compute::TensorInfo aclInput  = BuildArmComputeTensorInfo(input,
-                                                                        descriptor.m_DataLayout.GetDataLayout());
-    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output,
-                                                                        descriptor.m_DataLayout.GetDataLayout());
+    const arm_compute::TensorInfo aclInput  = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
 
-    unsigned int axis = (descriptor.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0;
+    unsigned int axis = (descriptor.m_DataLayout == DataLayout::NCHW) ? 2 : 0;
 
     return arm_compute::CLL2NormalizeLayer::validate(&aclInput, &aclOutput, axis);
 }
@@ -37,11 +35,11 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
-    unsigned int axis = (m_Data.m_Parameters.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0;
+    unsigned int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0;
 
     m_Layer.configure(&input, &output, axis);
 }
diff --git a/src/backends/cl/workloads/ClPooling2dWorkload.cpp b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
index f4b0356..607bc58 100644
--- a/src/backends/cl/workloads/ClPooling2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClPooling2dWorkload.cpp
@@ -19,10 +19,8 @@
     const TensorInfo& output,
     const Pooling2dDescriptor& descriptor)
 {
-    const arm_compute::TensorInfo aclInputInfo =
-            BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout());
-    const arm_compute::TensorInfo aclOutputInfo =
-            BuildArmComputeTensorInfo(output, descriptor.m_DataLayout.GetDataLayout());
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
 
     arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor);
 
@@ -38,7 +36,7 @@
     arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
index 3e2f895..ac7d60c 100644
--- a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
@@ -26,7 +26,7 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
index e576c64..a8181f6 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
@@ -21,20 +21,18 @@
                                                    const TensorInfo& gamma,
                                                    const BatchNormalizationDescriptor& descriptor)
 {
-    const DataLayout dataLayout = descriptor.m_DataLayout.GetDataLayout();
-
     const arm_compute::TensorInfo aclInputInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(input, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(output, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclMeanInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(mean, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclVarInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(var, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclBetaInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(beta, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclGammaInfo =
-          armcomputetensorutils::BuildArmComputeTensorInfo(gamma, dataLayout);
+          armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout);
 
     return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo,
                                                             &aclOutputInfo,
@@ -54,7 +52,7 @@
     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index ca3b36e..df8caef 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -14,12 +14,11 @@
                                                         const TensorInfo& output,
                                                         const L2NormalizationDescriptor& descriptor)
 {
-    const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout());
-    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(
-                                                  output, descriptor.m_DataLayout.GetDataLayout());
+    const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
 
     arm_compute::NormalizationLayerInfo normalizationInfo =
-            CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout());
+            CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
 
     return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
 }
@@ -34,14 +33,14 @@
     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
     m_Layer.configure(&input,
                       &output,
                       CreateAclNormalizationLayerInfoForL2Normalization(
-                          info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout()));
+                          info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout));
 }
 
 void NeonL2NormalizationFloatWorkload::Execute() const
diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
index b8acf36..9c8f71a 100644
--- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
@@ -18,9 +18,9 @@
     const Pooling2dDescriptor& descriptor)
 {
     const arm_compute::TensorInfo aclInputInfo =
-            BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout());
+            BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo =
-            BuildArmComputeTensorInfo(output, descriptor.m_DataLayout.GetDataLayout());
+            BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
 
     arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor);
 
@@ -36,7 +36,7 @@
     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
 
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.hpp b/src/backends/reference/workloads/BatchToSpaceNd.hpp
index 091d092..5d932cc 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.hpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.hpp
@@ -5,6 +5,7 @@
 
 #pragma once
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
 #include <backendsCommon/Workload.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <armnn/Types.hpp>
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 4b15c1d..0b9f8f7 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -10,6 +10,8 @@
 
 #include <armnn/Tensor.hpp>
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
+
 #include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
index 92b229d..3da8851 100644
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ b/src/backends/reference/workloads/ResizeBilinear.hpp
@@ -7,6 +7,8 @@
 
 #include <armnn/Tensor.hpp>
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
+
 namespace armnn
 {
 
diff --git a/src/backends/reference/workloads/SpaceToBatchNd.cpp b/src/backends/reference/workloads/SpaceToBatchNd.cpp
index 48c2127..6d0d004 100644
--- a/src/backends/reference/workloads/SpaceToBatchNd.cpp
+++ b/src/backends/reference/workloads/SpaceToBatchNd.cpp
@@ -5,6 +5,8 @@
 
 #include "SpaceToBatchNd.hpp"
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
+
 namespace armnn
 {
 
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index 53504d6..5593ba6 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -7,6 +7,8 @@
 
 #include <armnn/Tensor.hpp>
 
+#include <backendsCommon/DataLayoutIndexed.hpp>
+
 #include <boost/assert.hpp>
 
 namespace armnn