IVGCVSW-3738 Add end-to-end layer test for DepthToSpace

* Added end-to-end layer test implementation for DepthToSpace
* Added test to reference, CL and NEON backends for all supported
  data types and data layouts
* Extracted common data permutation code into new utility file and
  refactored some existing tests to reduce code duplication
* Fixed EndToEndLayerTestImpl template to work with Float16 data

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Iaf7a0012c520451052b20c37e36dc05fa8314ff6
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 481d7d8..034fe4c 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -10,7 +10,9 @@
     BackendRegistryTests.cpp
     CommonTestUtils.cpp
     CommonTestUtils.hpp
+    DataLayoutUtils.hpp
     DataTypeUtils.hpp
+    DepthToSpaceEndToEndTestImpl.hpp
     DequantizeEndToEndTestImpl.hpp
     DetectionPostProcessEndToEndTestImpl.hpp
     DynamicBackendTests.cpp
@@ -34,6 +36,7 @@
     QuantizedLstmEndToEndTestImpl.hpp
     ResizeEndToEndTestImpl.hpp
     RuntimeTestImpl.hpp
+    SpaceToDepthEndToEndTestImpl.cpp
     SpaceToDepthEndToEndTestImpl.hpp
     SplitterEndToEndTestImpl.hpp
     TensorCopyUtils.cpp
diff --git a/src/backends/backendsCommon/test/DataLayoutUtils.hpp b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
new file mode 100644
index 0000000..f893258
--- /dev/null
+++ b/src/backends/backendsCommon/test/DataLayoutUtils.hpp
@@ -0,0 +1,36 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Permute.hpp>
+
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+
+template<typename T>
+void PermuteTensorNchwToNhwc(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+    const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
+
+    tensorInfo = armnnUtils::Permuted(tensorInfo, nchwToNhwc);
+
+    std::vector<T> tmp(tensorData.size());
+    armnnUtils::Permute(tensorInfo.GetShape(), nchwToNhwc, tensorData.data(), tmp.data(), sizeof(T));
+    tensorData = tmp;
+}
+
+template<typename T>
+void PermuteTensorNhwcToNchw(armnn::TensorInfo& tensorInfo, std::vector<T>& tensorData)
+{
+    const armnn::PermutationVector nhwcToNchw = { 0, 2, 3, 1 };
+
+    tensorInfo = armnnUtils::Permuted(tensorInfo, nhwcToNchw);
+
+    std::vector<T> tmp(tensorData.size());
+    armnnUtils::Permute(tensorInfo.GetShape(), nhwcToNchw, tensorData.data(), tmp.data(), sizeof(T));
+
+    tensorData = tmp;
+}
diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
new file mode 100644
index 0000000..cf4db1d
--- /dev/null
+++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+#include <backendsCommon/test/QuantizeHelper.hpp>
+
+namespace
+{
+
+armnn::INetworkPtr CreateDepthToSpaceNetwork(const armnn::TensorInfo& inputInfo,
+                                             const armnn::TensorInfo& outputInfo,
+                                             const armnn::DepthToSpaceDescriptor& descriptor)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+
+    IConnectableLayer* input        = network->AddInputLayer(0, "input");
+    IConnectableLayer* depthToSpace = network->AddDepthToSpaceLayer(descriptor, "depthToSpace");
+    IConnectableLayer* output       = network->AddOutputLayer(0, "output");
+
+    Connect(input, depthToSpace, inputInfo, 0, 0);
+    Connect(depthToSpace, output, outputInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void DepthToSpaceEndToEndImpl(const std::vector<armnn::BackendId>& backends,
+                              const DepthToSpaceDescriptor& descriptor,
+                              const armnn::TensorShape& nhwcInputShape,
+                              const armnn::TensorShape& nhwcOutputShape,
+                              const std::vector<float>& floatInputData,
+                              const std::vector<float>& floatExpectedOutputData)
+{
+    using namespace armnn;
+
+    TensorInfo inputInfo(nhwcInputShape, ArmnnType);
+    TensorInfo outputInfo(nhwcOutputShape, ArmnnType);
+
+    constexpr float   qScale  = 0.25f;
+    constexpr int32_t qOffset = 128;
+
+    // Set quantization parameters for quantized types
+    if (IsQuantizedType<T>())
+    {
+        inputInfo.SetQuantizationScale(qScale);
+        inputInfo.SetQuantizationOffset(qOffset);
+        outputInfo.SetQuantizationScale(qScale);
+        outputInfo.SetQuantizationOffset(qOffset);
+    }
+
+    std::vector<T> inputData          = QuantizedVector<T>(qScale, qOffset, floatInputData);
+    std::vector<T> expectedOutputData = QuantizedVector<T>(qScale, qOffset, floatExpectedOutputData);
+
+    // Permute tensors from NHWC to NCHW (if needed)
+    if (descriptor.m_DataLayout == DataLayout::NCHW)
+    {
+        PermuteTensorNhwcToNchw(inputInfo, inputData);
+        PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
+    }
+
+    INetworkPtr network = CreateDepthToSpaceNetwork(inputInfo, outputInfo, descriptor);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+                                                { { 0, inputData } },
+                                                { { 0, expectedOutputData } },
+                                                backends);
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType>
+void DepthToSpaceEndToEnd(const std::vector<armnn::BackendId>& defaultBackends,
+                          armnn::DataLayout dataLayout)
+{
+    using namespace armnn;
+
+    TensorShape inputShape  = { 2, 2, 2, 4 };
+    TensorShape outputShape = { 2, 4, 4, 1 };
+
+    std::vector<float> inputData =
+    {
+         1.f,  2.f,  3.f,  4.f,
+         5.f,  6.f,  7.f,  8.f,
+         9.f, 10.f, 11.f, 12.f,
+        13.f, 14.f, 15.f, 16.f,
+
+        17.f, 18.f, 19.f, 20.f,
+        21.f, 22.f, 23.f, 24.f,
+        25.f, 26.f, 27.f, 28.f,
+        29.f, 30.f, 31.f, 32.f
+    };
+
+    std::vector<float> expectedOutputData =
+    {
+         1.f,  2.f,  5.f,  6.f,
+         3.f,  4.f,  7.f,  8.f,
+         9.f, 10.f, 13.f, 14.f,
+        11.f, 12.f, 15.f, 16.f,
+
+        17.f, 18.f, 21.f, 22.f,
+        19.f, 20.f, 23.f, 24.f,
+        25.f, 26.f, 29.f, 30.f,
+        27.f, 28.f, 31.f, 32.f
+    };
+
+    DepthToSpaceEndToEndImpl<ArmnnType>(defaultBackends,
+                                        DepthToSpaceDescriptor(2, dataLayout),
+                                        inputShape,
+                                        outputShape,
+                                        inputData,
+                                        expectedOutputData);
+}
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 1577e13..efaffb9 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -105,11 +105,23 @@
     );
 }
 
-template<typename T>
-bool CompareBoolean(T a, T b)
+// Utility template for comparing tensor elements
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
+bool Compare(T a, T b)
 {
-    return (a == 0 && b == 0) ||(a != 0 && b != 0);
-};
+    if (ArmnnType == DataType::Boolean)
+    {
+        // NOTE: Boolean is represented as uint8_t (with zero equals
+        // false and everything else equals true), therefore values
+        // need to be casted to bool before comparing them
+        return static_cast<bool>(a) == static_cast<bool>(b);
+    }
+
+    // NOTE: All other types can be cast to float and compared with
+    // a certain level of tolerance
+    constexpr float tolerance = 0.000001f;
+    return std::fabs(static_cast<float>(a) - static_cast<float>(b)) <= tolerance;
+}
 
 // Utility function to find the number of instances of a substring within a string.
 int SubStringCounter(std::string& string, std::string&& substring)
@@ -170,19 +182,9 @@
     for (auto&& it : expectedOutputData)
     {
         std::vector<TOutput> out = outputStorage.at(it.first);
-        if (ArmnnOType == DataType::Boolean)
+        for (unsigned int i = 0; i < out.size(); ++i)
         {
-            for (unsigned int i = 0; i < out.size(); ++i)
-            {
-                BOOST_TEST(CompareBoolean<TOutput>(it.second[i], out[i]));
-            }
-        }
-        else
-        {
-            for (unsigned int i = 0; i < out.size(); ++i)
-            {
-                BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
-            }
+            BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i]) == true);
         }
     }
 }
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
new file mode 100644
index 0000000..8eb1c97
--- /dev/null
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -0,0 +1,222 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpaceToDepthEndToEndTestImpl.hpp"
+
+#include "ResolveType.hpp"
+#include "DataLayoutIndexed.hpp"
+#include "EndToEndTestImpl.hpp"
+
+#include <Permute.hpp>
+
+#include <armnn/INetwork.hpp>
+
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+
+#include <test/TestUtils.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+armnn::INetworkPtr CreateSpaceToDepthNetwork(const armnn::TensorShape& inputShape,
+                                             const armnn::TensorShape& outputShape,
+                                             const armnn::DataLayout dataLayout,
+                                             unsigned int blockSize,
+                                             const float qScale = 1.0f,
+                                             const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+
+    armnnUtils::DataLayoutIndexed dimensionIndices(dataLayout);
+    if (inputShape[dimensionIndices.GetHeightIndex()] % blockSize!=0
+        || inputShape[dimensionIndices.GetWidthIndex()] % blockSize!=0)
+    {
+        throw InvalidArgumentException("Input shape must be divisible by block size in all spatial dimensions");
+    }
+
+    SpaceToDepthDescriptor spaceToDepthDesc;
+    spaceToDepthDesc.m_BlockSize = blockSize;
+    spaceToDepthDesc.m_DataLayout = dataLayout;
+
+    IConnectableLayer* SpaceToDepth = net->AddSpaceToDepthLayer(spaceToDepthDesc, "SpaceToDepth");
+    IConnectableLayer* input        = net->AddInputLayer(0, "input");
+    Connect(input, SpaceToDepth, inputTensorInfo, 0, 0);
+
+    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+    Connect(SpaceToDepth, output, outputTensorInfo, 0, 0);
+
+    return net;
+}
+
+void SpaceToDepthEndToEnd(const std::vector<armnn::BackendId>& backends,
+                          const armnn::DataLayout& dataLayout,
+                          armnn::TensorInfo& inputTensorInfo,
+                          armnn::TensorInfo& outputTensorInfo,
+                          std::vector<float>& inputData,
+                          std::vector<float>& expectedOutputData,
+                          const unsigned int blockSize)
+{
+    using namespace armnn;
+
+    if (dataLayout == DataLayout::NCHW)
+    {
+        PermuteTensorNhwcToNchw<float>(inputTensorInfo, inputData);
+        PermuteTensorNhwcToNchw<float>(outputTensorInfo, expectedOutputData);
+    }
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateSpaceToDepthNetwork<DataType::Float32>(
+            inputTensorInfo.GetShape(),
+            outputTensorInfo.GetShape(),
+            dataLayout,
+            blockSize);
+
+    BOOST_TEST_CHECKPOINT("Create a network");
+
+    std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
+    std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
+
+    EndToEndLayerTestImpl<DataType::Float32, DataType::Float32>(
+            move(net),
+            inputTensorData,
+            expectedOutputTensorData,
+            backends);
+}
+
+} // anonymous namespace
+
+void SpaceToDepthNhwcEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const unsigned int blockSize = 2;
+
+    TensorShape inputShape{1, 2, 2, 1};
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    TensorShape outputShape{1, 1, 1, 4};
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+    {
+        1.0f, 2.0f, 3.0f, 4.0f
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        1.0f, 2.0f, 3.0f, 4.0f
+    });
+
+    SpaceToDepthEndToEnd(defaultBackends,
+                         DataLayout::NHWC,
+                         inputTensorInfo,
+                         outputTensorInfo,
+                         inputData,
+                         expectedOutputData,
+                         blockSize);
+}
+
+void SpaceToDepthNchwEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const unsigned int blockSize = 2;
+
+    TensorShape inputShape{1, 2, 2, 1};
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    TensorShape outputShape{1, 1, 1, 4};
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+    {
+        1.0f, 2.0f, 3.0f, 4.0f
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        1.0f, 2.0f, 3.0f, 4.0f
+    });
+
+    SpaceToDepthEndToEnd(defaultBackends,
+                         DataLayout::NCHW,
+                         inputTensorInfo,
+                         outputTensorInfo,
+                         inputData,
+                         expectedOutputData,
+                         blockSize);
+}
+
+void SpaceToDepthNhwcEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const unsigned int blockSize = 2;
+
+    TensorShape inputShape{1, 2, 2, 2};
+    TensorShape outputShape{1, 1, 1, 8};
+
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+
+    std::vector<float> inputData = std::vector<float>(
+    {
+        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+    });
+
+    SpaceToDepthEndToEnd(defaultBackends,
+                         DataLayout::NHWC,
+                         inputTensorInfo,
+                         outputTensorInfo,
+                         inputData,
+                         expectedOutputData,
+                         blockSize);
+}
+
+void SpaceToDepthNchwEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
+{
+    using namespace armnn;
+
+    const unsigned int blockSize = 2;
+
+    TensorShape inputShape{1, 2, 2, 2};
+    TensorShape outputShape{1, 1, 1, 8};
+
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32);
+
+
+    std::vector<float> inputData = std::vector<float>(
+    {
+        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+    });
+
+    std::vector<float> expectedOutputData = std::vector<float>(
+    {
+        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+    });
+
+    SpaceToDepthEndToEnd(defaultBackends,
+                         DataLayout::NCHW,
+                         inputTensorInfo,
+                         outputTensorInfo,
+                         inputData,
+                         expectedOutputData,
+                         blockSize);
+}
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp
index fd442a8..e765c41 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp
@@ -2,225 +2,17 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
-#include "ResolveType.hpp"
-#include "DataLayoutIndexed.hpp"
-#include "EndToEndTestImpl.hpp"
-
-#include "armnn/INetwork.hpp"
-
-#include "backendsCommon/test/CommonTestUtils.hpp"
-
-#include <Permute.hpp>
-#include <boost/test/unit_test.hpp>
+#include <armnn/BackendId.hpp>
 
 #include <vector>
 
-namespace
-{
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-void PermuteDataToNCHW(const std::vector<armnn::BackendId>& backends,
-                       const armnn::DataLayout& dataLayout,
-                       TensorInfo& tensorInfo,
-                       std::vector<T>& data)
-{
-    const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
+void SpaceToDepthNhwcEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends);
 
-    tensorInfo = armnnUtils::Permuted(tensorInfo, NHWCToNCHW);
+void SpaceToDepthNchwEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends);
 
-    std::vector<T> tmp(data.size());
-    armnnUtils::Permute(tensorInfo.GetShape(), NHWCToNCHW, data.data(), tmp.data(), sizeof(T));
+void SpaceToDepthNhwcEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends);
 
-    data = tmp;
-}
-
-template<typename armnn::DataType DataType>
-armnn::INetworkPtr CreateSpaceToDepthNetwork(const armnn::TensorShape& inputShape,
-                                             const armnn::TensorShape& outputShape,
-                                             const armnn::DataLayout dataLayout,
-                                             unsigned int blockSize,
-                                             const float qScale = 1.0f,
-                                             const int32_t qOffset = 0)
-{
-    using namespace armnn;
-    // Builds up the structure of the network.
-    INetworkPtr net(INetwork::Create());
-
-    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
-
-    armnnUtils::DataLayoutIndexed dimensionIndices(dataLayout);
-    if (inputShape[dimensionIndices.GetHeightIndex()] % blockSize!=0
-        || inputShape[dimensionIndices.GetWidthIndex()] % blockSize!=0)
-    {
-        throw InvalidArgumentException("Input shape must be divisible by block size in all spatial dimensions");
-    }
-
-    SpaceToDepthDescriptor spaceToDepthDesc;
-    spaceToDepthDesc.m_BlockSize = blockSize;
-    spaceToDepthDesc.m_DataLayout = dataLayout;
-
-    IConnectableLayer* SpaceToDepth = net->AddSpaceToDepthLayer(spaceToDepthDesc, "SpaceToDepth");
-    IConnectableLayer* input        = net->AddInputLayer(0, "input");
-    Connect(input, SpaceToDepth, inputTensorInfo, 0, 0);
-
-    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
-    IConnectableLayer* output = net->AddOutputLayer(0, "output");
-    Connect(SpaceToDepth, output, outputTensorInfo, 0, 0);
-
-    return net;
-}
-
-void SpaceToDepthEndToEnd(const std::vector<armnn::BackendId>& backends,
-                          const armnn::DataLayout& dataLayout,
-                          TensorInfo& inputTensorInfo,
-                          TensorInfo& outputTensorInfo,
-                          std::vector<float>& inputData,
-                          std::vector<float>& expectedOutputData,
-                          const unsigned int blockSize)
-{
-    using namespace armnn;
-
-    if (dataLayout == armnn::DataLayout::NCHW)
-    {
-        PermuteDataToNCHW<armnn::DataType::Float32>(backends, dataLayout, inputTensorInfo, inputData);
-        PermuteDataToNCHW<armnn::DataType::Float32>(backends, dataLayout, outputTensorInfo, expectedOutputData);
-    }
-
-    // Builds up the structure of the network
-    INetworkPtr net = CreateSpaceToDepthNetwork<armnn::DataType::Float32>(
-            inputTensorInfo.GetShape(),
-            outputTensorInfo.GetShape(),
-            dataLayout,
-            blockSize);
-
-    BOOST_TEST_CHECKPOINT("Create a network");
-
-    std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
-    std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
-
-    EndToEndLayerTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
-            move(net),
-            inputTensorData,
-            expectedOutputTensorData,
-            backends);
-}
-
-void SpaceToDepthNHWCEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
-{
-    const unsigned int blockSize = 2;
-
-    armnn::TensorShape inputShape{1, 2, 2, 1};
-    armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-
-    armnn::TensorShape outputShape{1, 1, 1, 4};
-    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-
-    std::vector<float> inputData = std::vector<float>(
-    {
-        1.0f, 2.0f, 3.0f, 4.0f
-    });
-
-    std::vector<float> expectedOutputData = std::vector<float>(
-    {
-        1.0f, 2.0f, 3.0f, 4.0f
-    });
-
-    SpaceToDepthEndToEnd(defaultBackends,
-                         armnn::DataLayout::NHWC,
-                         inputTensorInfo,
-                         outputTensorInfo,
-                         inputData,
-                         expectedOutputData,
-                         blockSize);
-}
-
-void SpaceToDepthNCHWEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
-{
-    const unsigned int blockSize = 2;
-
-    armnn::TensorShape inputShape{1, 2, 2, 1};
-    armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-
-    armnn::TensorShape outputShape{1, 1, 1, 4};
-    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-
-    std::vector<float> inputData = std::vector<float>(
-    {
-        1.0f, 2.0f, 3.0f, 4.0f
-    });
-
-    std::vector<float> expectedOutputData = std::vector<float>(
-    {
-        1.0f, 2.0f, 3.0f, 4.0f
-    });
-
-    SpaceToDepthEndToEnd(defaultBackends,
-                         armnn::DataLayout::NCHW,
-                         inputTensorInfo,
-                         outputTensorInfo,
-                         inputData,
-                         expectedOutputData,
-                         blockSize);
-}
-
-void SpaceToDepthNHWCEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
-{
-    const unsigned int blockSize = 2;
-
-    armnn::TensorShape inputShape{1, 2, 2, 2};
-    armnn::TensorShape outputShape{1, 1, 1, 8};
-
-    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-    armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-
-    std::vector<float> inputData = std::vector<float>(
-    {
-        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
-    });
-
-    std::vector<float> expectedOutputData = std::vector<float>(
-    {
-        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
-    });
-
-    SpaceToDepthEndToEnd(defaultBackends,
-                         armnn::DataLayout::NHWC,
-                         inputTensorInfo,
-                         outputTensorInfo,
-                         inputData,
-                         expectedOutputData,
-                         blockSize);
-}
-
-void SpaceToDepthNCHWEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
-{
-    const unsigned int blockSize = 2;
-
-    armnn::TensorShape inputShape{1, 2, 2, 2};
-    armnn::TensorShape outputShape{1, 1, 1, 8};
-
-    armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-
-
-    std::vector<float> inputData = std::vector<float>(
-    {
-        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
-    });
-
-    std::vector<float> expectedOutputData = std::vector<float>(
-    {
-        1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
-    });
-
-    SpaceToDepthEndToEnd(defaultBackends,
-                         armnn::DataLayout::NCHW,
-                         inputTensorInfo,
-                         outputTensorInfo,
-                         inputData,
-                         expectedOutputData,
-                         blockSize);
-}
-
-} // anonymous namespace
+void SpaceToDepthNchwEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends);
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index 9588f56..e21a4b6 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -9,6 +9,7 @@
 
 #include <armnn/ArmNN.hpp>
 
+#include <backendsCommon/test/DataLayoutUtils.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -29,22 +30,10 @@
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
-    const armnn::PermutationVector permVector{0, 2, 3, 1};
-
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
     {
-        inputInfo  = armnnUtils::Permuted(inputInfo, permVector);
-        outputInfo = armnnUtils::Permuted(outputInfo, permVector);
-
-        constexpr size_t typeSize = sizeof(float);
-
-        std::vector<float> inputTmp(inputData.size());
-        armnnUtils::Permute(inputInfo.GetShape(), permVector, inputData.data(), inputTmp.data(), typeSize);
-        inputData = inputTmp;
-
-        std::vector<float> outputTmp(expectedOutputData.size());
-        armnnUtils::Permute(outputInfo.GetShape(), permVector, expectedOutputData.data(), outputTmp.data(), typeSize);
-        expectedOutputData = outputTmp;
+        PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
+        PermuteTensorNhwcToNchw<float>(outputInfo, expectedOutputData);
     }
 
     if(armnn::IsQuantizedType<T>())
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
index 6191adf..7391f9c 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/CommonTestUtils.hpp>
+#include <backendsCommon/test/DataLayoutUtils.hpp>
 #include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -194,27 +194,16 @@
 }
 
 template<typename T>
-void SwizzleData(const armnn::TensorInfo& inputInfo,
+void SwizzleData(armnn::TensorInfo& inputInfo,
                  std::vector<T>& inputData,
-                 const armnn::TensorInfo& outputInfo,
+                 armnn::TensorInfo& outputInfo,
                  std::vector<T>& outputData,
-                 const armnn::TensorInfo& weightsInfo,
+                 armnn::TensorInfo& weightsInfo,
                  std::vector<T>& weightsData)
 {
-    constexpr size_t dataTypeSize = sizeof(float);
-    const armnn::PermutationVector nchwToNhwc = { 0, 3, 1, 2 };
-
-    std::vector<T> tmp(inputData.size());
-    armnnUtils::Permute(inputInfo.GetShape(), nchwToNhwc, inputData.data(), tmp.data(), dataTypeSize);
-    inputData = tmp;
-
-    tmp.resize(weightsData.size());
-    armnnUtils::Permute(weightsInfo.GetShape(), nchwToNhwc, weightsData.data(), tmp.data(), dataTypeSize);
-    weightsData = tmp;
-
-    tmp.resize(outputData.size());
-    armnnUtils::Permute(outputInfo.GetShape(), nchwToNhwc, outputData.data(), tmp.data(), dataTypeSize);
-    outputData = tmp;
+    PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
+    PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
+    PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
 }
 
 } // anonymous namespace
@@ -240,9 +229,9 @@
     constexpr unsigned int wWeights = 3u;
     constexpr unsigned int hWeights = wWeights;
 
-    TensorShape inputShape   = MakeTensorShape(batches, channels, hInput, wInput, layout);
-    TensorShape outputShape  = MakeTensorShape(batches, channels, hOutput, wOutput, layout);
-    TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout);
+    TensorShape inputShape   = { batches, channels, hInput,   wInput   };
+    TensorShape outputShape  = { batches, channels, hOutput,  wOutput  };
+    TensorShape weightsShape = { batches, channels, hWeights, wWeights };
 
     TensorInfo inputInfo(inputShape, ArmnnType);
     TensorInfo outputInfo(outputShape, ArmnnType);
@@ -327,9 +316,9 @@
     constexpr unsigned int wWeights = 3u;
     constexpr unsigned int hWeights = wWeights;
 
-    TensorShape inputShape   = MakeTensorShape(batches, channels, hInput, wInput, layout);
-    TensorShape outputShape  = MakeTensorShape(batches, channels, hOutput, wOutput, layout);
-    TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout);
+    TensorShape inputShape   = { batches, channels, hInput,   wInput   };
+    TensorShape outputShape  = { batches, channels, hOutput,  wOutput  };
+    TensorShape weightsShape = { batches, channels, hWeights, wWeights };
 
     TensorInfo inputInfo(inputShape, ArmnnType);
     TensorInfo outputInfo(outputShape, ArmnnType);
@@ -416,9 +405,9 @@
     constexpr unsigned int wWeights = 3u;
     constexpr unsigned int hWeights = wWeights;
 
-    TensorShape inputShape   = MakeTensorShape(batches, channels, hInput, wInput, layout);
-    TensorShape outputShape  = MakeTensorShape(batches, channels, hOutput, wOutput, layout);
-    TensorShape weightsShape = MakeTensorShape(batches, channels, hWeights, wWeights, layout);
+    TensorShape inputShape   = { batches, channels, hInput,   wInput   };
+    TensorShape outputShape  = { batches, channels, hOutput,  wOutput  };
+    TensorShape weightsShape = { batches, channels, hWeights, wWeights };
 
     TensorInfo inputInfo(inputShape, ArmnnType);
     TensorInfo outputInfo(outputShape, ArmnnType);
@@ -492,11 +481,11 @@
 {
     using namespace armnn;
 
-    TensorShape inputShape   = MakeTensorShape(1, 1, 2, 2, layout);
-    TensorShape outputShape  = MakeTensorShape(1, 2, 5, 5, layout);
+    TensorShape inputShape   = { 1, 1, 2, 2 };
+    TensorShape outputShape  = { 1, 2, 5, 5 };
 
     // OIHW for NCHW; OHWI for NHWC
-    TensorShape weightsShape = MakeTensorShape(2, 1, 3, 3, layout);
+    TensorShape weightsShape = { 2, 1, 3, 3 };
     TensorShape biasesShape  = { 2 };
 
     TensorInfo inputInfo(inputShape, ArmnnType);