IVGCVSW-2173 - Add end to end layer test implementation and example usage
 * Add CommonTestUtils
 * Add end to end layer test implementation
 * Add example usage for Merger layer on Ref, Cl, Neon

Change-Id: I8931136288cd68b80bcdad8f5ae087ae1a70a60a
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index e16116e..15a3937 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -5,9 +5,12 @@
 #pragma once
 
 #include <armnn/ArmNN.hpp>
+#include <armnn/INetwork.hpp>
 
 #include <backendsCommon/test/QuantizeHelper.hpp>
 
+#include <boost/test/unit_test.hpp>
+
 #include <vector>
 
 namespace
@@ -99,4 +102,51 @@
     );
 }
 
+template<typename T>
+void EndToEndLayerTestImpl(INetworkPtr network,
+                           const std::map<int, std::vector<T>>& inputTensorData,
+                           const std::map<int, std::vector<T>>& expectedOutputData,
+                           std::vector<BackendId> backends)
+{
+    // Create runtime in which test will run
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // optimize the network
+    IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
+
+    // Loads it into the runtime.
+    NetworkId netId;
+    runtime->LoadNetwork(netId, std::move(optNet));
+
+    InputTensors inputTensors;
+    inputTensors.reserve(inputTensorData.size());
+    for (auto&& it : inputTensorData)
+    {
+        inputTensors.push_back({it.first,
+                                ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())});
+    }
+    OutputTensors outputTensors;
+    outputTensors.reserve(expectedOutputData.size());
+    std::map<int, std::vector<T>> outputStorage;
+    for (auto&& it : expectedOutputData)
+    {
+        std::vector<T> out(it.second.size());
+        outputStorage.emplace(it.first, out);
+        outputTensors.push_back({it.first,
+                                 Tensor(runtime->GetOutputTensorInfo(netId, it.first),
+                                               outputStorage.at(it.first).data())});
+    }
+
+    // Does the inference.
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // Checks the results.
+    for (auto&& it : expectedOutputData)
+    {
+        std::vector<T> out = outputStorage.at(it.first);
+        BOOST_TEST(it.second == out);
+    }
+}
+
 } // anonymous namespace
\ No newline at end of file