IVGCVSW-6772 Eliminate armnn/src/backends/backendsCommon/test/MockBackend.hpp

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ie99fe9786eb5e30585f437d0c6362c73688148db
diff --git a/include/armnnTestUtils/MockBackend.hpp b/include/armnnTestUtils/MockBackend.hpp
index 8bc41b3..425062a 100644
--- a/include/armnnTestUtils/MockBackend.hpp
+++ b/include/armnnTestUtils/MockBackend.hpp
@@ -4,9 +4,12 @@
 //
 #pragma once
 
+#include <atomic>
+
 #include <armnn/backends/IBackendInternal.hpp>
 #include <armnn/backends/MemCopyWorkload.hpp>
 #include <armnnTestUtils/MockTensorHandle.hpp>
+#include <backendsCommon/LayerSupportBase.hpp>
 
 namespace armnn
 {
@@ -26,16 +29,20 @@
         return GetIdStatic();
     }
     IBackendInternal::IWorkloadFactoryPtr
-        CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) const override
-    {
-        IgnoreUnused(memoryManager);
-        return nullptr;
-    }
+        CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) const override;
 
-    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
-    {
-        return nullptr;
-    };
+    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+
+    IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override;
+
+    IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
+    IBackendInternal::IBackendProfilingContextPtr
+    CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
+                                  IBackendProfilingPtr& backendProfiling) override;
+
+    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+
+    std::unique_ptr<ICustomAllocator> GetDefaultAllocator() const override;
 };
 
 class MockWorkloadFactory : public IWorkloadFactory
@@ -112,4 +119,208 @@
     mutable std::shared_ptr<MockMemoryManager> m_MemoryManager;
 };
 
+class MockBackendInitialiser
+{
+public:
+    MockBackendInitialiser();
+    ~MockBackendInitialiser();
+};
+
+class MockBackendProfilingContext : public arm::pipe::IBackendProfilingContext
+{
+public:
+    MockBackendProfilingContext(IBackendInternal::IBackendProfilingPtr& backendProfiling)
+        : m_BackendProfiling(std::move(backendProfiling))
+        , m_CapturePeriod(0)
+        , m_IsTimelineEnabled(true)
+    {}
+
+    ~MockBackendProfilingContext() = default;
+
+    IBackendInternal::IBackendProfilingPtr& GetBackendProfiling()
+    {
+        return m_BackendProfiling;
+    }
+
+    uint16_t RegisterCounters(uint16_t currentMaxGlobalCounterId)
+    {
+        std::unique_ptr<arm::pipe::IRegisterBackendCounters> counterRegistrar =
+            m_BackendProfiling->GetCounterRegistrationInterface(static_cast<uint16_t>(currentMaxGlobalCounterId));
+
+        std::string categoryName("MockCounters");
+        counterRegistrar->RegisterCategory(categoryName);
+
+        counterRegistrar->RegisterCounter(0, categoryName, 0, 0, 1.f, "Mock Counter One", "Some notional counter");
+
+        counterRegistrar->RegisterCounter(1, categoryName, 0, 0, 1.f, "Mock Counter Two",
+                                          "Another notional counter");
+
+        std::string units("microseconds");
+        uint16_t nextMaxGlobalCounterId =
+                        counterRegistrar->RegisterCounter(2, categoryName, 0, 0, 1.f, "Mock MultiCore Counter",
+                                                          "A dummy four core counter", units, 4);
+        return nextMaxGlobalCounterId;
+    }
+
+    Optional<std::string> ActivateCounters(uint32_t capturePeriod, const std::vector<uint16_t>& counterIds)
+    {
+        if (capturePeriod == 0 || counterIds.size() == 0)
+        {
+            m_ActiveCounters.clear();
+        }
+        else if (capturePeriod == 15939u)
+        {
+            return armnn::Optional<std::string>("ActivateCounters example test error");
+        }
+        m_CapturePeriod  = capturePeriod;
+        m_ActiveCounters = counterIds;
+        return armnn::Optional<std::string>();
+    }
+
+    std::vector<arm::pipe::Timestamp> ReportCounterValues()
+    {
+        std::vector<arm::pipe::CounterValue> counterValues;
+
+        for (auto counterId : m_ActiveCounters)
+        {
+            counterValues.emplace_back(arm::pipe::CounterValue{ counterId, counterId + 1u });
+        }
+
+        uint64_t timestamp = m_CapturePeriod;
+        return { arm::pipe::Timestamp{ timestamp, counterValues } };
+    }
+
+    bool EnableProfiling(bool)
+    {
+        auto sendTimelinePacket = m_BackendProfiling->GetSendTimelinePacket();
+        sendTimelinePacket->SendTimelineEntityBinaryPacket(4256);
+        sendTimelinePacket->Commit();
+        return true;
+    }
+
+    bool EnableTimelineReporting(bool isEnabled)
+    {
+        m_IsTimelineEnabled = isEnabled;
+        return isEnabled;
+    }
+
+    bool TimelineReportingEnabled()
+    {
+        return m_IsTimelineEnabled;
+    }
+
+private:
+    IBackendInternal::IBackendProfilingPtr m_BackendProfiling;
+    uint32_t m_CapturePeriod;
+    std::vector<uint16_t> m_ActiveCounters;
+    std::atomic<bool> m_IsTimelineEnabled;
+};
+
+class MockBackendProfilingService
+{
+public:
+    // Getter for the singleton instance
+    static MockBackendProfilingService& Instance()
+    {
+        static MockBackendProfilingService instance;
+        return instance;
+    }
+
+    MockBackendProfilingContext* GetContext()
+    {
+        return m_sharedContext.get();
+    }
+
+    void SetProfilingContextPtr(std::shared_ptr<MockBackendProfilingContext> shared)
+    {
+        m_sharedContext = shared;
+    }
+
+private:
+    std::shared_ptr<MockBackendProfilingContext> m_sharedContext;
+};
+
+class MockLayerSupport : public LayerSupportBase
+{
+public:
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& descriptor,
+                          const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
+                          const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
+                          Optional<std::string&> reasonIfUnsupported) const override
+    {
+        switch(type)
+        {
+            case LayerType::Input:
+                return IsInputSupported(infos[0], reasonIfUnsupported);
+            case LayerType::Output:
+                return IsOutputSupported(infos[0], reasonIfUnsupported);
+            case LayerType::Addition:
+                return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+            case LayerType::Convolution2d:
+            {
+                if (infos.size() != 4)
+                {
+                    throw InvalidArgumentException("Invalid number of TransposeConvolution2d "
+                                                   "TensorInfos. TensorInfos should be of format: "
+                                                   "{input, output, weights, biases}.");
+                }
+
+                auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+                if (infos[3] == TensorInfo())
+                {
+                    return IsConvolution2dSupported(infos[0],
+                                                    infos[1],
+                                                    desc,
+                                                    infos[2],
+                                                    EmptyOptional(),
+                                                    reasonIfUnsupported);
+                }
+                else
+                {
+                    return IsConvolution2dSupported(infos[0],
+                                                    infos[1],
+                                                    desc,
+                                                    infos[2],
+                                                    infos[3],
+                                                    reasonIfUnsupported);
+                }
+            }
+            default:
+                return false;
+        }
+    }
+
+    bool IsInputSupported(const TensorInfo& /*input*/,
+                          Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
+    {
+        return true;
+    }
+
+    bool IsOutputSupported(const TensorInfo& /*input*/,
+                           Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
+    {
+        return true;
+    }
+
+    bool IsAdditionSupported(const TensorInfo& /*input0*/,
+                             const TensorInfo& /*input1*/,
+                             const TensorInfo& /*output*/,
+                             Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
+    {
+        return true;
+    }
+
+    bool IsConvolution2dSupported(const TensorInfo& /*input*/,
+                                  const TensorInfo& /*output*/,
+                                  const Convolution2dDescriptor& /*descriptor*/,
+                                  const TensorInfo& /*weights*/,
+                                  const Optional<TensorInfo>& /*biases*/,
+                                  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
+    {
+        return true;
+    }
+};
+
 }    // namespace armnn
diff --git a/src/armnnTestUtils/MockBackend.cpp b/src/armnnTestUtils/MockBackend.cpp
index 29996bf..ac7f7c7 100644
--- a/src/armnnTestUtils/MockBackend.cpp
+++ b/src/armnnTestUtils/MockBackend.cpp
@@ -3,18 +3,17 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include <armnn/BackendRegistry.hpp>
 #include <armnn/backends/MemCopyWorkload.hpp>
 #include <armnnTestUtils/MockBackend.hpp>
 #include <armnnTestUtils/MockTensorHandle.hpp>
+#include <backendsCommon/DefaultAllocator.hpp>
+#include <backendsCommon/test/MockBackendId.hpp>
+#include <SubgraphViewSelector.hpp>
 
 namespace armnn
 {
 
-constexpr const char* MockBackendId()
-{
-    return "CpuMock";
-}
-
 const BackendId& MockBackend::GetIdStatic()
 {
     static const BackendId s_Id{MockBackendId()};
@@ -58,4 +57,246 @@
     }
 }
 
+bool IsLayerSupported(const armnn::Layer* layer)
+{
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::LayerType layerType = layer->GetType();
+    switch (layerType)
+    {
+        case armnn::LayerType::Input:
+        case armnn::LayerType::Output:
+        case armnn::LayerType::Addition:
+        case armnn::LayerType::Convolution2d:
+            // Layer supported
+            return true;
+        default:
+            // Layer unsupported
+            return false;
+    }
+}
+
+bool IsLayerSupported(const armnn::Layer& layer)
+{
+    return IsLayerSupported(&layer);
+}
+
+bool IsLayerOptimizable(const armnn::Layer* layer)
+{
+    ARMNN_ASSERT(layer != nullptr);
+
+    // A Layer is not optimizable if its name contains "unoptimizable"
+    const std::string layerName(layer->GetName());
+    bool optimizable = layerName.find("unoptimizable") == std::string::npos;
+
+    return optimizable;
+}
+
+bool IsLayerOptimizable(const armnn::Layer& layer)
+{
+    return IsLayerOptimizable(&layer);
+}
+
+} // Anonymous namespace
+
+namespace armnn
+{
+
+MockBackendInitialiser::MockBackendInitialiser()
+{
+    BackendRegistryInstance().Register(MockBackend::GetIdStatic(),
+                                       []()
+                                       {
+                                           return IBackendInternalUniquePtr(new MockBackend);
+                                       });
+}
+
+MockBackendInitialiser::~MockBackendInitialiser()
+{
+    try
+    {
+        BackendRegistryInstance().Deregister(MockBackend::GetIdStatic());
+    }
+    catch (...)
+    {
+        std::cerr << "could not deregister mock backend" << std::endl;
+    }
+}
+
+IBackendInternal::IWorkloadFactoryPtr MockBackend::CreateWorkloadFactory(
+    const IBackendInternal::IMemoryManagerSharedPtr& /*memoryManager*/) const
+{
+    return IWorkloadFactoryPtr{};
+}
+
+IBackendInternal::IBackendContextPtr MockBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
+{
+    return IBackendContextPtr{};
+}
+
+IBackendInternal::IBackendProfilingContextPtr MockBackend::CreateBackendProfilingContext(
+    const IRuntime::CreationOptions& options, IBackendProfilingPtr& backendProfiling)
+{
+    IgnoreUnused(options);
+    std::shared_ptr<armnn::MockBackendProfilingContext> context =
+        std::make_shared<MockBackendProfilingContext>(backendProfiling);
+    MockBackendProfilingService::Instance().SetProfilingContextPtr(context);
+    return context;
+}
+
+IBackendInternal::IMemoryManagerUniquePtr MockBackend::CreateMemoryManager() const
+{
+    return IMemoryManagerUniquePtr{};
+}
+
+IBackendInternal::ILayerSupportSharedPtr MockBackend::GetLayerSupport() const
+{
+    static ILayerSupportSharedPtr layerSupport{new MockLayerSupport};
+    return layerSupport;
+}
+
+OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
+{
+    // Prepare the optimization views
+    OptimizationViews optimizationViews;
+
+    // Get the layers of the input sub-graph
+    const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
+
+    // Parse the layers
+    SubgraphView::IConnectableLayers supportedLayers;
+    SubgraphView::IConnectableLayers unsupportedLayers;
+    SubgraphView::IConnectableLayers untouchedLayers;
+    std::for_each(subgraphLayers.begin(),
+                  subgraphLayers.end(),
+                  [&](IConnectableLayer* layer)
+                  {
+                      bool supported = IsLayerSupported(PolymorphicDowncast<Layer*>(layer));
+                      if (supported)
+                      {
+                          // Layer supported, check if it's optimizable
+                          bool optimizable = IsLayerOptimizable(PolymorphicDowncast<Layer*>(layer));
+                          if (optimizable)
+                          {
+                              // Layer fully supported
+                              supportedLayers.push_back(layer);
+                          }
+                          else
+                          {
+                              // Layer supported but not optimizable
+                              untouchedLayers.push_back(layer);
+                          }
+                      }
+                      else
+                      {
+                          // Layer unsupported
+                          unsupportedLayers.push_back(layer);
+                      }
+                  });
+
+    // Check if there are supported layers
+    if (!supportedLayers.empty())
+    {
+        // Select the layers that are neither inputs or outputs, but that are optimizable
+        auto supportedSubgraphSelector = [](const Layer& layer)
+        {
+            return layer.GetType() != LayerType::Input &&
+                layer.GetType() != LayerType::Output &&
+                IsLayerSupported(layer) &&
+                IsLayerOptimizable(layer);
+        };
+
+        // Apply the subgraph selector to the supported layers to group them into sub-graphs were appropriate
+        SubgraphView mutableSubgraph(subgraph);
+        SubgraphViewSelector::Subgraphs supportedSubgraphs =
+                         SubgraphViewSelector::SelectSubgraphs(mutableSubgraph, supportedSubgraphSelector);
+
+        // Create a substitution pair for each supported sub-graph
+        std::for_each(supportedSubgraphs.begin(),
+                      supportedSubgraphs.end(),
+                      [&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
+                      {
+                          ARMNN_ASSERT(supportedSubgraph != nullptr);
+
+                          CompiledBlobPtr blobPtr;
+                          BackendId backend = MockBackendId();
+
+                          IConnectableLayer* preCompiledLayer =
+                                               optimizationViews.GetINetwork()->AddPrecompiledLayer(
+                                                   PreCompiledDescriptor(supportedSubgraph->GetNumInputSlots(),
+                                                                         supportedSubgraph->GetNumOutputSlots()),
+                                                   std::move(blobPtr),
+                                                   backend,
+                                                   nullptr);
+
+                          SubgraphView substitutionSubgraph(*supportedSubgraph);
+                          SubgraphView replacementSubgraph(preCompiledLayer);
+
+                          optimizationViews.AddSubstitution({ substitutionSubgraph, replacementSubgraph });
+                      });
+    }
+
+    // Check if there are unsupported layers
+    if (!unsupportedLayers.empty())
+    {
+        // Select the layers that are neither inputs or outputs, and are not optimizable
+        auto unsupportedSubgraphSelector = [](const Layer& layer)
+        {
+            return layer.GetType() != LayerType::Input &&
+                layer.GetType() != LayerType::Output &&
+                !IsLayerSupported(layer);
+        };
+
+        // Apply the subgraph selector to the unsupported layers to group them into sub-graphs were appropriate
+        SubgraphView mutableSubgraph(subgraph);
+        SubgraphViewSelector::Subgraphs unsupportedSubgraphs =
+                         SubgraphViewSelector::SelectSubgraphs(mutableSubgraph, unsupportedSubgraphSelector);
+
+        // Add each unsupported sub-graph to the list of failed sub-graphs in the optimizization views
+        std::for_each(unsupportedSubgraphs.begin(),
+                      unsupportedSubgraphs.end(),
+                      [&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
+                      {
+                          ARMNN_ASSERT(unsupportedSubgraph != nullptr);
+
+                          optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
+                      });
+    }
+
+    // Check if there are untouched layers
+    if (!untouchedLayers.empty())
+    {
+        // Select the layers that are neither inputs or outputs, that are supported but that and are not optimizable
+        auto untouchedSubgraphSelector = [](const Layer& layer)
+        {
+            return layer.GetType() != LayerType::Input &&
+                layer.GetType() != LayerType::Output &&
+                IsLayerSupported(layer) &&
+                !IsLayerOptimizable(layer);
+        };
+
+        // Apply the subgraph selector to the untouched layers to group them into sub-graphs were appropriate
+        SubgraphView mutableSubgraph(subgraph);
+        SubgraphViewSelector::Subgraphs untouchedSubgraphs =
+                         SubgraphViewSelector::SelectSubgraphs(mutableSubgraph, untouchedSubgraphSelector);
+
+        // Add each untouched sub-graph to the list of untouched sub-graphs in the optimizization views
+        std::for_each(untouchedSubgraphs.begin(),
+                      untouchedSubgraphs.end(),
+                      [&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
+                      {
+                          ARMNN_ASSERT(untouchedSubgraph != nullptr);
+
+                          optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
+                      });
+    }
+
+    return optimizationViews;
+}
+
+std::unique_ptr<ICustomAllocator> MockBackend::GetDefaultAllocator() const
+{
+    return std::make_unique<DefaultAllocator>();
+}
+
 }    // namespace armnn
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
index c0b4e0a..f1ec46b 100644
--- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp
+++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
@@ -7,7 +7,6 @@
 #include "CounterDirectory.hpp"
 #include "CounterIdMap.hpp"
 #include "Holder.hpp"
-#include "MockBackend.hpp"
 #include "MockBackendId.hpp"
 #include "PeriodicCounterCapture.hpp"
 #include "PeriodicCounterSelectionCommandHandler.hpp"
@@ -23,6 +22,7 @@
 #include <armnn/Logging.hpp>
 #include <armnn/profiling/ISendTimelinePacket.hpp>
 #include <armnn/profiling/ProfilingOptions.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
 
 #include <doctest/doctest.h>
 #include <vector>
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index bb85f7e..a668c51 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -40,8 +40,6 @@
     LogSoftmaxEndToEndTestImpl.cpp
     LogSoftmaxEndToEndTestImpl.hpp
     MemoryManagerTests.cpp
-    MockBackend.cpp
-    MockBackend.hpp
     MockBackendId.hpp
     OptimizeSubgraphViewTests.cpp
     OptimizationViewsTests.cpp
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
deleted file mode 100644
index 2ce14f9..0000000
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ /dev/null
@@ -1,271 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "MockBackend.hpp"
-#include "MockBackendId.hpp"
-
-#include <armnn/BackendRegistry.hpp>
-
-#include <armnn/backends/IBackendContext.hpp>
-#include <armnn/backends/IMemoryManager.hpp>
-#include <backendsCommon/DefaultAllocator.hpp>
-
-#include <Optimizer.hpp>
-#include <SubgraphViewSelector.hpp>
-
-#include <algorithm>
-
-namespace
-{
-
-bool IsLayerSupported(const armnn::Layer* layer)
-{
-    ARMNN_ASSERT(layer != nullptr);
-
-    armnn::LayerType layerType = layer->GetType();
-    switch (layerType)
-    {
-    case armnn::LayerType::Input:
-    case armnn::LayerType::Output:
-    case armnn::LayerType::Addition:
-    case armnn::LayerType::Convolution2d:
-        // Layer supported
-        return true;
-    default:
-        // Layer unsupported
-        return false;
-    }
-}
-
-bool IsLayerSupported(const armnn::Layer& layer)
-{
-    return IsLayerSupported(&layer);
-}
-
-bool IsLayerOptimizable(const armnn::Layer* layer)
-{
-    ARMNN_ASSERT(layer != nullptr);
-
-    // A Layer is not optimizable if its name contains "unoptimizable"
-    const std::string layerName(layer->GetName());
-    bool optimizable = layerName.find("unoptimizable") == std::string::npos;
-
-    return optimizable;
-}
-
-bool IsLayerOptimizable(const armnn::Layer& layer)
-{
-    return IsLayerOptimizable(&layer);
-}
-
-} // Anonymous namespace
-
-namespace armnn
-{
-
-MockBackendInitialiser::MockBackendInitialiser()
-{
-    BackendRegistryInstance().Register(MockBackend::GetIdStatic(),
-                                       []()
-                                       {
-                                           return IBackendInternalUniquePtr(new MockBackend);
-                                       });
-}
-
-MockBackendInitialiser::~MockBackendInitialiser()
-{
-    try
-    {
-        BackendRegistryInstance().Deregister(MockBackend::GetIdStatic());
-    }
-    catch (...)
-    {
-        std::cerr << "could not deregister mock backend" << std::endl;
-    }
-}
-
-const BackendId& MockBackend::GetIdStatic()
-{
-    static const BackendId s_Id{MockBackendId()};
-    return s_Id;
-}
-
-IBackendInternal::IWorkloadFactoryPtr MockBackend::CreateWorkloadFactory(
-    const IBackendInternal::IMemoryManagerSharedPtr& /*memoryManager*/) const
-{
-    return IWorkloadFactoryPtr{};
-}
-
-IBackendInternal::IBackendContextPtr MockBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
-{
-    return IBackendContextPtr{};
-}
-
-IBackendInternal::IBackendProfilingContextPtr MockBackend::CreateBackendProfilingContext(
-    const IRuntime::CreationOptions& options, IBackendProfilingPtr& backendProfiling)
-{
-    IgnoreUnused(options);
-    std::shared_ptr<armnn::MockBackendProfilingContext> context =
-        std::make_shared<MockBackendProfilingContext>(backendProfiling);
-    MockBackendProfilingService::Instance().SetProfilingContextPtr(context);
-    return context;
-}
-
-IBackendInternal::IMemoryManagerUniquePtr MockBackend::CreateMemoryManager() const
-{
-    return IMemoryManagerUniquePtr{};
-}
-
-IBackendInternal::ILayerSupportSharedPtr MockBackend::GetLayerSupport() const
-{
-    static ILayerSupportSharedPtr layerSupport{new MockLayerSupport};
-    return layerSupport;
-}
-
-OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
-{
-    // Prepare the optimization views
-    OptimizationViews optimizationViews;
-
-    // Get the layers of the input sub-graph
-    const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
-
-    // Parse the layers
-    SubgraphView::IConnectableLayers supportedLayers;
-    SubgraphView::IConnectableLayers unsupportedLayers;
-    SubgraphView::IConnectableLayers untouchedLayers;
-    std::for_each(subgraphLayers.begin(),
-                  subgraphLayers.end(),
-                  [&](IConnectableLayer* layer)
-    {
-        bool supported = IsLayerSupported(PolymorphicDowncast<Layer*>(layer));
-        if (supported)
-        {
-            // Layer supported, check if it's optimizable
-            bool optimizable = IsLayerOptimizable(PolymorphicDowncast<Layer*>(layer));
-            if (optimizable)
-            {
-                // Layer fully supported
-                supportedLayers.push_back(layer);
-            }
-            else
-            {
-                // Layer supported but not optimizable
-                untouchedLayers.push_back(layer);
-            }
-        }
-        else
-        {
-            // Layer unsupported
-            unsupportedLayers.push_back(layer);
-        }
-    });
-
-    // Check if there are supported layers
-    if (!supportedLayers.empty())
-    {
-        // Select the layers that are neither inputs or outputs, but that are optimizable
-        auto supportedSubgraphSelector = [](const Layer& layer)
-        {
-            return layer.GetType() != LayerType::Input &&
-                   layer.GetType() != LayerType::Output &&
-                   IsLayerSupported(layer) &&
-                   IsLayerOptimizable(layer);
-        };
-
-        // Apply the subgraph selector to the supported layers to group them into sub-graphs were appropriate
-        SubgraphView mutableSubgraph(subgraph);
-        SubgraphViewSelector::Subgraphs supportedSubgraphs =
-                SubgraphViewSelector::SelectSubgraphs(mutableSubgraph, supportedSubgraphSelector);
-
-        // Create a substitution pair for each supported sub-graph
-        std::for_each(supportedSubgraphs.begin(),
-                      supportedSubgraphs.end(),
-                      [&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
-        {
-            ARMNN_ASSERT(supportedSubgraph != nullptr);
-
-            CompiledBlobPtr blobPtr;
-            BackendId backend = MockBackendId();
-
-            IConnectableLayer* preCompiledLayer =
-                optimizationViews.GetINetwork()->AddPrecompiledLayer(
-                        PreCompiledDescriptor(supportedSubgraph->GetNumInputSlots(),
-                                              supportedSubgraph->GetNumOutputSlots()),
-                                              std::move(blobPtr),
-                                              backend,
-                                              nullptr);
-
-            SubgraphView substitutionSubgraph(*supportedSubgraph);
-            SubgraphView replacementSubgraph(preCompiledLayer);
-
-            optimizationViews.AddSubstitution({ substitutionSubgraph, replacementSubgraph });
-        });
-    }
-
-    // Check if there are unsupported layers
-    if (!unsupportedLayers.empty())
-    {
-        // Select the layers that are neither inputs or outputs, and are not optimizable
-        auto unsupportedSubgraphSelector = [](const Layer& layer)
-        {
-            return layer.GetType() != LayerType::Input &&
-                   layer.GetType() != LayerType::Output &&
-                   !IsLayerSupported(layer);
-        };
-
-        // Apply the subgraph selector to the unsupported layers to group them into sub-graphs were appropriate
-        SubgraphView mutableSubgraph(subgraph);
-        SubgraphViewSelector::Subgraphs unsupportedSubgraphs =
-                SubgraphViewSelector::SelectSubgraphs(mutableSubgraph, unsupportedSubgraphSelector);
-
-        // Add each unsupported sub-graph to the list of failed sub-graphs in the optimizization views
-        std::for_each(unsupportedSubgraphs.begin(),
-                      unsupportedSubgraphs.end(),
-                      [&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
-        {
-            ARMNN_ASSERT(unsupportedSubgraph != nullptr);
-
-            optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
-        });
-    }
-
-    // Check if there are untouched layers
-    if (!untouchedLayers.empty())
-    {
-        // Select the layers that are neither inputs or outputs, that are supported but that and are not optimizable
-        auto untouchedSubgraphSelector = [](const Layer& layer)
-        {
-            return layer.GetType() != LayerType::Input &&
-                   layer.GetType() != LayerType::Output &&
-                   IsLayerSupported(layer) &&
-                   !IsLayerOptimizable(layer);
-        };
-
-        // Apply the subgraph selector to the untouched layers to group them into sub-graphs were appropriate
-        SubgraphView mutableSubgraph(subgraph);
-        SubgraphViewSelector::Subgraphs untouchedSubgraphs =
-                SubgraphViewSelector::SelectSubgraphs(mutableSubgraph, untouchedSubgraphSelector);
-
-        // Add each untouched sub-graph to the list of untouched sub-graphs in the optimizization views
-        std::for_each(untouchedSubgraphs.begin(),
-                      untouchedSubgraphs.end(),
-                      [&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
-        {
-            ARMNN_ASSERT(untouchedSubgraph != nullptr);
-
-            optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
-        });
-    }
-
-    return optimizationViews;
-}
-
-std::unique_ptr<ICustomAllocator> MockBackend::GetDefaultAllocator() const
-{
-    return std::make_unique<DefaultAllocator>();
-}
-
-} // namespace armnn
diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp
deleted file mode 100644
index 9b7b2f3..0000000
--- a/src/backends/backendsCommon/test/MockBackend.hpp
+++ /dev/null
@@ -1,255 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "MockBackendId.hpp"
-#include "armnn/backends/profiling/IBackendProfiling.hpp"
-#include "armnn/backends/profiling/IBackendProfilingContext.hpp"
-
-#include <LayerSupportCommon.hpp>
-#include <armnn/backends/IBackendInternal.hpp>
-#include <armnn/backends/OptimizationViews.hpp>
-#include <armnn/backends/profiling/IBackendProfiling.hpp>
-#include <backends/BackendProfiling.hpp>
-#include <backendsCommon/LayerSupportBase.hpp>
-
-namespace armnn
-{
-
-class MockBackendInitialiser
-{
-public:
-    MockBackendInitialiser();
-    ~MockBackendInitialiser();
-};
-
-class MockBackendProfilingContext : public arm::pipe::IBackendProfilingContext
-{
-public:
-    MockBackendProfilingContext(IBackendInternal::IBackendProfilingPtr& backendProfiling)
-        : m_BackendProfiling(std::move(backendProfiling))
-        , m_CapturePeriod(0)
-        , m_IsTimelineEnabled(true)
-    {}
-
-    ~MockBackendProfilingContext() = default;
-
-    IBackendInternal::IBackendProfilingPtr& GetBackendProfiling()
-    {
-        return m_BackendProfiling;
-    }
-
-    uint16_t RegisterCounters(uint16_t currentMaxGlobalCounterId)
-    {
-        std::unique_ptr<arm::pipe::IRegisterBackendCounters> counterRegistrar =
-            m_BackendProfiling->GetCounterRegistrationInterface(static_cast<uint16_t>(currentMaxGlobalCounterId));
-
-        std::string categoryName("MockCounters");
-        counterRegistrar->RegisterCategory(categoryName);
-
-        counterRegistrar->RegisterCounter(0, categoryName, 0, 0, 1.f, "Mock Counter One", "Some notional counter");
-
-        counterRegistrar->RegisterCounter(1, categoryName, 0, 0, 1.f, "Mock Counter Two",
-                                                                   "Another notional counter");
-
-        std::string units("microseconds");
-        uint16_t nextMaxGlobalCounterId =
-                counterRegistrar->RegisterCounter(2, categoryName, 0, 0, 1.f, "Mock MultiCore Counter",
-                                                                   "A dummy four core counter", units, 4);
-        return nextMaxGlobalCounterId;
-    }
-
-    Optional<std::string> ActivateCounters(uint32_t capturePeriod, const std::vector<uint16_t>& counterIds)
-    {
-        if (capturePeriod == 0 || counterIds.size() == 0)
-        {
-            m_ActiveCounters.clear();
-        }
-        else if (capturePeriod == 15939u)
-        {
-            return armnn::Optional<std::string>("ActivateCounters example test error");
-        }
-        m_CapturePeriod  = capturePeriod;
-        m_ActiveCounters = counterIds;
-        return armnn::Optional<std::string>();
-    }
-
-    std::vector<arm::pipe::Timestamp> ReportCounterValues()
-    {
-        std::vector<arm::pipe::CounterValue> counterValues;
-
-        for (auto counterId : m_ActiveCounters)
-        {
-            counterValues.emplace_back(arm::pipe::CounterValue{ counterId, counterId + 1u });
-        }
-
-        uint64_t timestamp = m_CapturePeriod;
-        return { arm::pipe::Timestamp{ timestamp, counterValues } };
-    }
-
-    bool EnableProfiling(bool)
-    {
-        auto sendTimelinePacket = m_BackendProfiling->GetSendTimelinePacket();
-        sendTimelinePacket->SendTimelineEntityBinaryPacket(4256);
-        sendTimelinePacket->Commit();
-        return true;
-    }
-
-    bool EnableTimelineReporting(bool isEnabled)
-    {
-        m_IsTimelineEnabled = isEnabled;
-        return isEnabled;
-    }
-
-    bool TimelineReportingEnabled()
-    {
-        return m_IsTimelineEnabled;
-    }
-
-private:
-    IBackendInternal::IBackendProfilingPtr m_BackendProfiling;
-    uint32_t m_CapturePeriod;
-    std::vector<uint16_t> m_ActiveCounters;
-    std::atomic<bool> m_IsTimelineEnabled;
-};
-
-class MockBackendProfilingService
-{
-public:
-    // Getter for the singleton instance
-    static MockBackendProfilingService& Instance()
-    {
-        static MockBackendProfilingService instance;
-        return instance;
-    }
-
-    MockBackendProfilingContext* GetContext()
-    {
-        return m_sharedContext.get();
-    }
-
-    void SetProfilingContextPtr(std::shared_ptr<MockBackendProfilingContext> shared)
-    {
-        m_sharedContext = shared;
-    }
-
-private:
-    std::shared_ptr<MockBackendProfilingContext> m_sharedContext;
-};
-
-class MockBackend : public IBackendInternal
-{
-public:
-    MockBackend()  = default;
-    ~MockBackend() = default;
-
-    static const BackendId& GetIdStatic();
-    const BackendId& GetId() const override
-    {
-        return GetIdStatic();
-    }
-
-    IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override;
-
-    IBackendInternal::IWorkloadFactoryPtr
-        CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) const override;
-
-    IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
-    IBackendInternal::IBackendProfilingContextPtr
-        CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
-                                      IBackendProfilingPtr& backendProfiling) override;
-
-    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
-
-    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
-
-    std::unique_ptr<ICustomAllocator> GetDefaultAllocator() const override;
-};
-
-class MockLayerSupport : public LayerSupportBase
-{
-public:
-    bool IsLayerSupported(const LayerType& type,
-                          const std::vector<TensorInfo>& infos,
-                          const BaseDescriptor& descriptor,
-                          const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
-                          const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
-                          Optional<std::string&> reasonIfUnsupported) const override
-    {
-        switch(type)
-        {
-            case LayerType::Input:
-                return IsInputSupported(infos[0], reasonIfUnsupported);
-            case LayerType::Output:
-                return IsOutputSupported(infos[0], reasonIfUnsupported);
-            case LayerType::Addition:
-                return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
-            case LayerType::Convolution2d:
-            {
-                if (infos.size() != 4)
-                {
-                    throw InvalidArgumentException("Invalid number of TransposeConvolution2d "
-                                                   "TensorInfos. TensorInfos should be of format: "
-                                                   "{input, output, weights, biases}.");
-                }
-
-                auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
-                if (infos[3] == TensorInfo())
-                {
-                    return IsConvolution2dSupported(infos[0],
-                                                    infos[1],
-                                                    desc,
-                                                    infos[2],
-                                                    EmptyOptional(),
-                                                    reasonIfUnsupported);
-                }
-                else
-                {
-                    return IsConvolution2dSupported(infos[0],
-                                                    infos[1],
-                                                    desc,
-                                                    infos[2],
-                                                    infos[3],
-                                                    reasonIfUnsupported);
-                }
-            }
-            default:
-                return false;
-        }
-    }
-
-    bool IsInputSupported(const TensorInfo& /*input*/,
-                          Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
-    {
-        return true;
-    }
-
-    bool IsOutputSupported(const TensorInfo& /*input*/,
-                           Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
-    {
-        return true;
-    }
-
-    bool IsAdditionSupported(const TensorInfo& /*input0*/,
-                             const TensorInfo& /*input1*/,
-                             const TensorInfo& /*output*/,
-                             Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
-    {
-        return true;
-    }
-
-    bool IsConvolution2dSupported(const TensorInfo& /*input*/,
-                                  const TensorInfo& /*output*/,
-                                  const Convolution2dDescriptor& /*descriptor*/,
-                                  const TensorInfo& /*weights*/,
-                                  const Optional<TensorInfo>& /*biases*/,
-                                  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
-    {
-        return true;
-    }
-};
-
-}    // namespace armnn
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index a551dfc..f0f5b63 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -5,7 +5,6 @@
 
 
 #include <CommonTestUtils.hpp>
-#include "MockBackend.hpp"
 
 #include <Graph.hpp>
 #include <Network.hpp>
@@ -14,6 +13,7 @@
 #include <armnn/backends/OptimizationViews.hpp>
 #include <armnn/backends/SubgraphView.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
 
 #include <doctest/doctest.h>
 
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 8036b41..ad59704 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -4,13 +4,13 @@
 //
 
 #include <CommonTestUtils.hpp>
-#include "MockBackend.hpp"
 #include "MockBackendId.hpp"
 
 #include <Graph.hpp>
 #include <Network.hpp>
 
 #include <armnn/BackendRegistry.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
 
 #include <doctest/doctest.h>
 #include <unordered_map>
diff --git a/src/backends/cl/test/DefaultAllocatorTests.cpp b/src/backends/cl/test/DefaultAllocatorTests.cpp
index 6bb11a1..eaa30c8 100644
--- a/src/backends/cl/test/DefaultAllocatorTests.cpp
+++ b/src/backends/cl/test/DefaultAllocatorTests.cpp
@@ -4,6 +4,7 @@
 //
 
 #include <armnn/backends/ICustomAllocator.hpp>
+#include <armnn/BackendRegistry.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
 #include <armnn/IRuntime.hpp>
@@ -12,7 +13,7 @@
 #include <cl/ClBackend.hpp>
 #include <doctest/doctest.h>
 #include <backendsCommon/DefaultAllocator.hpp>
-#include <backendsCommon/test/MockBackend.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
 #include <cl/ClBackendDefaultAllocator.hpp>
 
 using namespace armnn;
diff --git a/src/backends/reference/test/RefMemCopyTests.cpp b/src/backends/reference/test/RefMemCopyTests.cpp
index e97d979..5b1f103 100644
--- a/src/backends/reference/test/RefMemCopyTests.cpp
+++ b/src/backends/reference/test/RefMemCopyTests.cpp
@@ -32,7 +32,7 @@
 TEST_SUITE("RefMemCopy")
 {
 
-    TEST_CASE("CopyBetweenCpuMockAndRef")
+    TEST_CASE("CopyBetweenMockAccAndRef")
     {
         LayerTestResult<float, 4> result =
             MemCopyTest<armnn::MockWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
@@ -41,7 +41,7 @@
         CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 
-    TEST_CASE("CopyBetweenRefAndCpuMock")
+    TEST_CASE("CopyBetweenRefAndMockAcc")
     {
         LayerTestResult<float, 4> result =
             MemCopyTest<armnn::RefWorkloadFactory, armnn::MockWorkloadFactory, armnn::DataType::Float32>(false);
@@ -50,7 +50,7 @@
         CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 
-    TEST_CASE("CopyBetweenCpuMockAndRefWithSubtensors")
+    TEST_CASE("CopyBetweenMockAccAndRefWithSubtensors")
     {
         LayerTestResult<float, 4> result =
             MemCopyTest<armnn::MockWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
@@ -59,7 +59,7 @@
         CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 
-    TEST_CASE("CopyBetweenRefAndCpuMockWithSubtensors")
+    TEST_CASE("CopyBetweenRefAndMockAccWithSubtensors")
     {
         LayerTestResult<float, 4> result =
             MemCopyTest<armnn::RefWorkloadFactory, armnn::MockWorkloadFactory, armnn::DataType::Float32>(true);
diff --git a/src/profiling/CounterIdMap.hpp b/src/profiling/CounterIdMap.hpp
index 3895594..cce7184 100644
--- a/src/profiling/CounterIdMap.hpp
+++ b/src/profiling/CounterIdMap.hpp
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <map>
+#include <string>
 
 namespace arm
 {
diff --git a/tests/profiling/gatordmock/tests/GatordMockTests.cpp b/tests/profiling/gatordmock/tests/GatordMockTests.cpp
index ff007bb..549bb44 100644
--- a/tests/profiling/gatordmock/tests/GatordMockTests.cpp
+++ b/tests/profiling/gatordmock/tests/GatordMockTests.cpp
@@ -9,7 +9,7 @@
 #include <ProfilingService.hpp>
 #include <TimelinePacketWriterFactory.hpp>
 #include <Runtime.hpp>
-#include <MockBackend.hpp>
+#include <armnnTestUtils/MockBackend.hpp>
 
 #include <common/include/LabelsAndEventClasses.hpp>
 #include <common/include/CommandHandlerRegistry.hpp>