IVGCVSW-2173 - Add end to end layer test implementation and example usage
 * Add CommonTestUtils
 * Add end to end layer test implementation
 * Add example usage for Merger layer on Ref, Cl, Neon

Change-Id: I8931136288cd68b80bcdad8f5ae087ae1a70a60a
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 7419c14..962c6a5 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -9,6 +9,7 @@
     BackendIdTests.cpp
     BackendRegistryTests.cpp
     BatchNormTestImpl.hpp
+    CommonTestUtils.hpp
     Conv2dTestImpl.hpp
     ConvertFp16ToFp32TestImpl.hpp
     ConvertFp32ToFp16TestImpl.hpp
@@ -21,6 +22,7 @@
     LayerTests.hpp
     LstmTestImpl.hpp
     NormTestImpl.hpp
+    MergerTestImpl.hpp
     OptimizedNetworkTests.cpp
     PermuteTestImpl.hpp
     Pooling2dTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp
new file mode 100644
index 0000000..68180fb
--- /dev/null
+++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <Graph.hpp>
+
+using namespace armnn;
+
+namespace
+{
+
+// Connects two layers.
+void Connect(IConnectableLayer* from, IConnectableLayer* to, const TensorInfo& tensorInfo,
+             unsigned int fromIndex = 0, unsigned int toIndex = 0)
+{
+    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+    from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
+}
+
+}
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index e16116e..15a3937 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -5,9 +5,12 @@
 #pragma once
 
 #include <armnn/ArmNN.hpp>
+#include <armnn/INetwork.hpp>
 
 #include <backendsCommon/test/QuantizeHelper.hpp>
 
+#include <boost/test/unit_test.hpp>
+
 #include <vector>
 
 namespace
@@ -99,4 +102,51 @@
     );
 }
 
+template<typename T>
+void EndToEndLayerTestImpl(INetworkPtr network,
+                           const std::map<int, std::vector<T>>& inputTensorData,
+                           const std::map<int, std::vector<T>>& expectedOutputData,
+                           std::vector<BackendId> backends)
+{
+    // Create runtime in which test will run
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // optimize the network
+    IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
+
+    // Loads it into the runtime.
+    NetworkId netId;
+    runtime->LoadNetwork(netId, std::move(optNet));
+
+    InputTensors inputTensors;
+    inputTensors.reserve(inputTensorData.size());
+    for (auto&& it : inputTensorData)
+    {
+        inputTensors.push_back({it.first,
+                                ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())});
+    }
+    OutputTensors outputTensors;
+    outputTensors.reserve(expectedOutputData.size());
+    std::map<int, std::vector<T>> outputStorage;
+    for (auto&& it : expectedOutputData)
+    {
+        std::vector<T> out(it.second.size());
+        outputStorage.emplace(it.first, out);
+        outputTensors.push_back({it.first,
+                                 Tensor(runtime->GetOutputTensorInfo(netId, it.first),
+                                               outputStorage.at(it.first).data())});
+    }
+
+    // Does the inference.
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // Checks the results.
+    for (auto&& it : expectedOutputData)
+    {
+        std::vector<T> out = outputStorage.at(it.first);
+        BOOST_TEST(it.second == out);
+    }
+}
+
 } // anonymous namespace
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp
new file mode 100644
index 0000000..e0b8233
--- /dev/null
+++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp
@@ -0,0 +1,302 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/INetwork.hpp>
+
+#include <backendsCommon/test/CommonTestUtils.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <vector>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+INetworkPtr CreateMergerNetwork(const std::vector<TensorShape>& inputShapes,
+                                const TensorShape& outputShape,
+                                unsigned int concatAxis,
+                                const float qScale = 1.0f,
+                                const int32_t qOffset = 0)
+{
+    using namespace armnn;
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    OriginsDescriptor descriptor;
+
+    descriptor = CreateMergerDescriptorForConcatenation(inputShapes.begin(),
+                                                        inputShapes.end(),
+                                                        concatAxis);
+    IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger");
+
+    for (unsigned int i = 0; i < inputShapes.size(); ++i)
+    {
+        TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
+        IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
+        Connect(input, merger, inputTensorInfo, 0, i);
+    }
+
+    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+    Connect(merger, output, outputTensorInfo, 0, 0);
+
+    return net;
+}
+
+template<typename T>
+void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
+{
+    using namespace armnn;
+
+    unsigned int concatAxis = 0;
+    const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+    const TensorShape& outputShape = { 4, 3, 2, 2 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+    BOOST_TEST_CHECKPOINT("create a network");
+
+    // Creates structures for input & output.
+    std::vector<T> inputData{
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12
+    };
+
+    std::vector<T> expectedOutput{
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12
+    };
+
+    std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+    std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+    EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<typename T>
+void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
+{
+    using namespace armnn;
+
+    unsigned int concatAxis = 1;
+    const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+    const TensorShape& outputShape = { 2, 6, 2, 2 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+    BOOST_TEST_CHECKPOINT("create a network");
+
+    // Creates structures for input & output.
+    std::vector<T> inputData{
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12
+    };
+
+    std::vector<T> expectedOutput{
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12
+    };
+
+    std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+    std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+    EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<typename T>
+void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
+{
+    using namespace armnn;
+
+    unsigned int concatAxis = 2;
+    const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+    const TensorShape& outputShape = { 2, 3, 4, 2 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+    BOOST_TEST_CHECKPOINT("create a network");
+
+    // Creates structures for input & output.
+    std::vector<T> inputData{
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12
+    };
+
+    std::vector<T> expectedOutput{
+        1, 2,
+        3, 4,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        9, 10,
+        11, 12
+    };
+
+    std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+    std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+    EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<typename T>
+void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
+{
+    using namespace armnn;
+
+    unsigned int concatAxis = 3;
+    const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+    const TensorShape& outputShape = { 2, 3, 2, 4 };
+
+    // Builds up the structure of the network
+    INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+    BOOST_TEST_CHECKPOINT("create a network");
+
+    // Creates structures for input & output.
+    std::vector<T> inputData{
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12,
+        1, 2,
+        3, 4,
+        5, 6,
+        7, 8,
+        9, 10,
+        11, 12
+    };
+
+    std::vector<T> expectedOutput{
+        1, 2,
+        1, 2,
+        3, 4,
+        3, 4,
+        5, 6,
+        5, 6,
+        7, 8,
+        7, 8,
+        9, 10,
+        9, 10,
+        11, 12,
+        11, 12,
+        1, 2,
+        1, 2,
+        3, 4,
+        3, 4,
+        5, 6,
+        5, 6,
+        7, 8,
+        7, 8,
+        9, 10,
+        9, 10,
+        11, 12,
+        11, 12
+    };
+
+    std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+    std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+    EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index b374079..bf299dc 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -4,15 +4,47 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/MergerTestImpl.hpp>
 
 #include <boost/test/unit_test.hpp>
 
 BOOST_AUTO_TEST_SUITE(ClEndToEnd)
 
+std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::GpuAcc};
+
 BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32)
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    ConstantUsageFloat32Test(backends);
+    ConstantUsageFloat32Test(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test)
+{
+    MergerDim0EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test)
+{
+    MergerDim0EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test)
+{
+    MergerDim1EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test)
+{
+    MergerDim1EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test)
+{
+    MergerDim3EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test)
+{
+    MergerDim3EndToEnd<uint8_t>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 3b7e309..3ca415a 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -4,15 +4,17 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/MergerTestImpl.hpp>
 
 #include <boost/test/unit_test.hpp>
 
 BOOST_AUTO_TEST_SUITE(NeonEndToEnd)
 
+std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc};
+
 BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    BOOST_TEST(ConstantUsageFloat32Test(backends));
+    BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
 }
 
 BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
@@ -49,4 +51,34 @@
     BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
+{
+    MergerDim0EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
+{
+    MergerDim0EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
+{
+    MergerDim1EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
+{
+    MergerDim1EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
+{
+    MergerDim3EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
+{
+    MergerDim3EndToEnd<uint8_t>(defaultBackends);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index dc2ffb8..97bec51 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -4,21 +4,22 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/MergerTestImpl.hpp>
 
 #include <boost/test/unit_test.hpp>
 
 BOOST_AUTO_TEST_SUITE(RefEndToEnd)
 
+std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
+
 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    BOOST_TEST(ConstantUsageFloat32Test(backends));
+    BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
 }
 
 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
 {
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    BOOST_TEST(ConstantUsageUint8Test(backends));
+    BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
 }
 
 BOOST_AUTO_TEST_CASE(Unsigned8)
@@ -51,8 +52,7 @@
     softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // optimize the network
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -115,8 +115,7 @@
     add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
     // optimize the network
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -214,8 +213,7 @@
     activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
     // optimize the network
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -248,4 +246,44 @@
     BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
 }
 
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
+{
+    MergerDim0EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
+{
+    MergerDim0EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
+{
+    MergerDim1EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
+{
+    MergerDim1EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
+{
+    MergerDim2EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
+{
+    MergerDim2EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
+{
+    MergerDim3EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
+{
+    MergerDim3EndToEnd<uint8_t>(defaultBackends);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file