IVGCVSW-2060: Separate and move backend specific unit tests from the src/armnn/test folder to the backends

* Moved backend-specific memory leak checking tests from RuntimeTests.cpp to
  the corresponding backend test folder

Change-Id: I0a7f4ef52c5350c3cebca23b2b4e61a9446ca11f
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index c549c01..1f89f3b 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -47,5 +47,6 @@
         test/ClLayerSupportTests.cpp \
         test/ClLayerTests.cpp \
         test/ClMemCopyTests.cpp \
+        test/ClRuntimeTests.cpp \
         test/Fp16SupportTest.cpp \
         test/OpenClTimerTest.cpp
diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt
index 262e23a..69aa08d 100644
--- a/src/backends/cl/test/CMakeLists.txt
+++ b/src/backends/cl/test/CMakeLists.txt
@@ -9,6 +9,7 @@
     ClLayerSupportTests.cpp
     ClLayerTests.cpp
     ClMemCopyTests.cpp
+    ClRuntimeTests.cpp
     OpenClTimerTest.cpp
 )
 
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
new file mode 100644
index 0000000..d29cd5b
--- /dev/null
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -0,0 +1,151 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/test/RuntimeTests.hpp>
+
+#include <armnnUtils/LeakChecking.hpp>
+
+#include <backends/test/RuntimeTestImpl.hpp>
+
+#include <boost/core/ignore_unused.hpp>
+#include <boost/test/unit_test.hpp>
+
+#ifdef WITH_VALGRIND
+#include <valgrind/memcheck.h>
+#endif
+
+BOOST_AUTO_TEST_SUITE(ClRuntime)
+
+BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback)
+{
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input = net->AddInputLayer(0);
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+    BOOST_CHECK(optNet);
+
+    // Load it into the runtime. It should success.
+    armnn::NetworkId netId;
+    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+}
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc)
+{
+    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    armnn::IRuntime::CreationOptions options;
+    armnn::Runtime runtime(options);
+    armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    {
+        // Do a warmup of this so we make sure that all one-time
+        // initialization happens before we do the leak checking.
+        CreateAndDropDummyNetwork(backends, runtime);
+    }
+
+    {
+        ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
+        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        // In the second run we check for all remaining memory
+        // in use after the network was unloaded. If there is any
+        // then it will be treated as a memory leak.
+        CreateAndDropDummyNetwork(backends, runtime);
+        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+    }
+}
+#endif
+
+// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
+#if defined(WITH_VALGRIND)
+BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
+{
+    // From documentation:
+
+    // This means that no pointer to the block can be found. The block is classified as "lost",
+    // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
+    unsigned long leakedBefore = 0;
+    unsigned long leakedAfter = 0;
+
+    // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
+    // the programmer could, at least in principle, have freed it before program exit.
+    // We want to test this in case memory is not freed as early as it could have been.
+    unsigned long reachableBefore = 0;
+    unsigned long reachableAfter = 0;
+
+    // Needed as out params but we don't test them.
+    unsigned long dubious = 0;
+    unsigned long suppressed = 0;
+
+    // Ensure that runtime is large enough before checking for memory leaks.
+    // Otherwise, when loading the network, it will automatically reserve memory that won't be released
+    // until destruction.
+    armnn::NetworkId networkIdentifier;
+    armnn::IRuntime::CreationOptions options;
+    armnn::Runtime runtime(options);
+    armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+    // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
+    VALGRIND_DO_QUICK_LEAK_CHECK;
+    VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
+
+    // build a mock-network and load it into the runtime
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    {
+        armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+        armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+
+        armnn::INetworkPtr mockNetwork(armnn::INetwork::Create());
+
+        armnn::IConnectableLayer* input = mockNetwork->AddInputLayer(0, "input");
+        armnn::IConnectableLayer* layer = mockNetwork->AddActivationLayer(armnn::ActivationDescriptor(), "test");
+        armnn::IConnectableLayer* output = mockNetwork->AddOutputLayer(0, "output");
+
+        input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+        layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+        // Sets the tensors in the network.
+        input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+        layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+        // optimize the network
+        armnn::IOptimizedNetworkPtr optNet = Optimize(*mockNetwork, backends, runtime.GetDeviceSpec());
+
+        runtime.LoadNetwork(networkIdentifier, std::move(optNet));
+    }
+
+    runtime.UnloadNetwork(networkIdentifier);
+
+    VALGRIND_DO_ADDED_LEAK_CHECK;
+    VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
+
+    // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
+    BOOST_TEST(leakedBefore == leakedAfter);
+
+    // Add resonable threshold after and before running valgrind with the ACL clear cache function.
+    // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
+    // value to 1024 when fixed.
+    BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
+
+    // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
+    // so they are assigned to, but still considered unused, causing a warning.
+    boost::ignore_unused(dubious);
+    boost::ignore_unused(suppressed);
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 4cab9fb..a4e6db9 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -44,4 +44,5 @@
         test/NeonLayerSupportTests.cpp \
         test/NeonLayerTests.cpp \
         test/NeonMemCopyTests.cpp \
+        test/NeonRuntimeTests.cpp \
         test/NeonTimerTest.cpp
diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt
index 384a5e1..e6a2859 100644
--- a/src/backends/neon/test/CMakeLists.txt
+++ b/src/backends/neon/test/CMakeLists.txt
@@ -8,6 +8,7 @@
     NeonLayerSupportTests.cpp
     NeonLayerTests.cpp
     NeonMemCopyTests.cpp
+    NeonRuntimeTests.cpp
     NeonTimerTest.cpp
 )
 
diff --git a/src/backends/neon/test/NeonRuntimeTests.cpp b/src/backends/neon/test/NeonRuntimeTests.cpp
new file mode 100644
index 0000000..6e6b1e9
--- /dev/null
+++ b/src/backends/neon/test/NeonRuntimeTests.cpp
@@ -0,0 +1,68 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/test/RuntimeTests.hpp>
+
+#include <armnnUtils/LeakChecking.hpp>
+
+#include <backends/test/RuntimeTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(NeonRuntime)
+
+BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
+{
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input = net->AddInputLayer(0);
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+    BOOST_CHECK(optNet);
+
+    // Load it into the runtime. It should success.
+    armnn::NetworkId netId;
+    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+}
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
+{
+    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    armnn::IRuntime::CreationOptions options;
+    armnn::Runtime runtime(options);
+    armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    {
+        // Do a warmup of this so we make sure that all one-time
+        // initialization happens before we do the leak checking.
+        CreateAndDropDummyNetwork(backends, runtime);
+    }
+
+    {
+        ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
+        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        // In the second run we check for all remaining memory
+        // in use after the network was unloaded. If there is any
+        // then it will be treated as a memory leak.
+        CreateAndDropDummyNetwork(backends, runtime);
+        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+    }
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 9ecb6d7..455ab46 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -66,4 +66,5 @@
 BACKEND_TEST_SOURCES := \
         test/RefCreateWorkloadTests.cpp \
         test/RefLayerSupportTests.cpp \
-        test/RefLayerTests.cpp
+        test/RefLayerTests.cpp \
+        test/RefRuntimeTests.cpp
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index deee364..dea0ef6 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -7,6 +7,7 @@
     RefCreateWorkloadTests.cpp
     RefLayerSupportTests.cpp
     RefLayerTests.cpp
+    RefRuntimeTests.cpp
 )
 
 add_library(armnnRefBackendUnitTests OBJECT ${armnnRefBackendUnitTests_sources})
diff --git a/src/backends/reference/test/RefRuntimeTests.cpp b/src/backends/reference/test/RefRuntimeTests.cpp
new file mode 100644
index 0000000..2536627
--- /dev/null
+++ b/src/backends/reference/test/RefRuntimeTests.cpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/test/RuntimeTests.hpp>
+
+#include <armnnUtils/LeakChecking.hpp>
+
+#include <backends/test/RuntimeTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(RefRuntime)
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef)
+{
+    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::Runtime runtime(options);
+    armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    {
+        // Do a warmup of this so we make sure that all one-time
+        // initialization happens before we do the leak checking.
+        CreateAndDropDummyNetwork(backends, runtime);
+    }
+
+    {
+        ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef");
+        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        // In the second run we check for all remaining memory
+        // in use after the network was unloaded. If there is any
+        // then it will be treated as a memory leak.
+        CreateAndDropDummyNetwork(backends, runtime);
+        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+    }
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/test/RuntimeTestImpl.hpp b/src/backends/test/RuntimeTestImpl.hpp
new file mode 100644
index 0000000..671f94b
--- /dev/null
+++ b/src/backends/test/RuntimeTestImpl.hpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Runtime.hpp>
+
+namespace
+{
+
+inline void CreateAndDropDummyNetwork(const std::vector<armnn::BackendId>& backends, armnn::Runtime& runtime)
+{
+    armnn::NetworkId networkIdentifier;
+    {
+        armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+        armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+
+        armnn::INetworkPtr network(armnn::INetwork::Create());
+
+        armnn::IConnectableLayer* input = network->AddInputLayer(0, "input");
+        armnn::IConnectableLayer* layer = network->AddActivationLayer(armnn::ActivationDescriptor(), "test");
+        armnn::IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+        input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+        layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+        // Sets the tensors in the network.
+        input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+        layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+        // optimize the network
+        armnn::IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime.GetDeviceSpec());
+
+        runtime.LoadNetwork(networkIdentifier, std::move(optNet));
+    }
+
+    runtime.UnloadNetwork(networkIdentifier);
+}
+
+} // anonymous namespace