IVGCVSW-2067 : dynamically create workload factories based on the backends in the network

Change-Id: Ide594db8c79ff67642721d8bad47624b88621fbd
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 7aa66d9..4013777 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -12,6 +12,7 @@
 #include "HeapProfiling.hpp"
 
 #include <backends/CpuTensorHandle.hpp>
+#include <backends/BackendRegistry.hpp>
 
 #include <boost/polymorphic_cast.hpp>
 #include <boost/assert.hpp>
@@ -70,8 +71,7 @@
 }
 
 LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net)
-    : m_CpuRef()
-    , m_OptimizedNetwork(std::move(net))
+    : m_OptimizedNetwork(std::move(net))
     , m_WorkingMemLock(m_WorkingMemMutex, std::defer_lock)
 {
     // Create a profiler and register it for the current thread.
@@ -79,12 +79,20 @@
     ProfilerManager::GetInstance().RegisterProfiler(m_Profiler.get());
 
     Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
-    //First create tensor handlers.
+    //First create tensor handlers, backends and workload factories.
     //Handlers are created before workloads are.
     //Because workload creation can modify some of the handlers,
     //(for example the splitter and merger layers).
     for (auto&& layer : order)
     {
+        auto const& backend = layer->GetBackendId();
+        if (m_Backends.count(backend) == 0)
+        {
+            auto createBackend = BackendRegistryInstance().GetFactory(backend);
+            auto it = m_Backends.emplace(std::make_pair(backend, createBackend()));
+            m_WorkloadFactories.emplace(std::make_pair(backend,
+                                                       it.first->second->CreateWorkloadFactory()));
+        }
         layer->CreateTensorHandles(m_OptimizedNetwork->GetGraph(), GetWorkloadFactory(*layer));
     }
 
@@ -126,9 +134,10 @@
     m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers();
 
     // Finalize the workload factories before execution.
-    m_CpuRef.Finalize();
-    m_CpuAcc.Finalize();
-    m_GpuAcc.Finalize();
+    for (auto&& workloadFactory : m_WorkloadFactories)
+    {
+        workloadFactory.second->Finalize();
+    }
 }
 
 TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
@@ -164,26 +173,25 @@
 {
     const IWorkloadFactory* workloadFactory = nullptr;
 
-    if (layer.GetBackendId() == Compute::CpuAcc)
+    auto it = m_WorkloadFactories.find(layer.GetBackendId());
+    if (it ==  m_WorkloadFactories.end())
     {
-        workloadFactory = &m_CpuAcc;
+        throw RuntimeException(
+            boost::str(
+                boost::format("No workload factory for %1% to be used for layer: %2%")
+                % layer.GetBackendId().Get()
+                % layer.GetNameStr()),
+            CHECK_LOCATION());
     }
-    else if (layer.GetBackendId() == Compute::GpuAcc)
-    {
-        workloadFactory = &m_GpuAcc;
-    }
-    else if (layer.GetBackendId() == Compute::CpuRef)
-    {
-        workloadFactory = &m_CpuRef;
-    }
+
+    workloadFactory = it->second.get();
 
     BOOST_ASSERT_MSG(workloadFactory, "No workload factory");
 
     std::string reasonIfUnsupported;
     BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
-                     "Factory does not support layer");
+        "Factory does not support layer");
     boost::ignore_unused(reasonIfUnsupported);
-
     return *workloadFactory;
 }
 
@@ -408,9 +416,10 @@
     {
         return;
     }
-    m_CpuRef.Acquire();
-    m_CpuAcc.Acquire();
-    m_GpuAcc.Acquire();
+    for (auto&& workloadFactory : m_WorkloadFactories)
+    {
+        workloadFactory.second->Acquire();
+    }
     m_IsWorkingMemAllocated = true;
 }
 
@@ -422,9 +431,10 @@
         return;
     }
     // Informs the memory managers to release memory in it's respective memory group
-    m_CpuRef.Release();
-    m_CpuAcc.Release();
-    m_GpuAcc.Release();
+    for (auto&& workloadFactory : m_WorkloadFactories)
+    {
+        workloadFactory.second->Release();
+    }
     m_IsWorkingMemAllocated = false;
 }
 
diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp
index 3deb8bc..51eb04f 100644
--- a/src/armnn/LoadedNetwork.hpp
+++ b/src/armnn/LoadedNetwork.hpp
@@ -11,13 +11,12 @@
 #include "LayerFwd.hpp"
 #include "Profiling.hpp"
 
-#include <backends/reference/RefWorkloadFactory.hpp>
-#include <backends/neon/NeonWorkloadFactory.hpp>
-#include <backends/cl/ClWorkloadFactory.hpp>
+#include <backends/IBackendInternal.hpp>
 #include <backends/Workload.hpp>
 #include <backends/WorkloadFactory.hpp>
 
 #include <mutex>
+#include <unordered_map>
 
 namespace cl
 {
@@ -62,9 +61,11 @@
 
     const IWorkloadFactory& GetWorkloadFactory(const Layer& layer) const;
 
-    RefWorkloadFactory  m_CpuRef;
-    NeonWorkloadFactory m_CpuAcc;
-    ClWorkloadFactory   m_GpuAcc;
+    using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
+    using WorkloadFactoryMap = std::unordered_map<BackendId, IBackendInternal::IWorkloadFactoryPtr>;
+
+    BackendPtrMap       m_Backends;
+    WorkloadFactoryMap  m_WorkloadFactories;
 
     std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork;
     WorkloadQueue m_InputQueue;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index f95e829..cab5106 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -211,7 +211,9 @@
 
                             // Try preferred backend first
                             layer->SetBackendId(preferredBackend);
-                            if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
+                            if (IWorkloadFactory::IsLayerSupported(*layer,
+                                                                   EmptyOptional(),
+                                                                   reasonIfUnsupported))
                             {
                                 supportedBackendFound = true;
                             }
@@ -226,7 +228,9 @@
                                     }
 
                                     layer->SetBackendId(backend);
-                                    if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported))
+                                    if (IWorkloadFactory::IsLayerSupported(*layer,
+                                                                           EmptyOptional(),
+                                                                           reasonIfUnsupported))
                                     {
                                         supportedBackendFound = true;
                                         break;
diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp
index 81a37ac..1131459 100644
--- a/src/armnnUtils/GraphTopologicalSort.hpp
+++ b/src/armnnUtils/GraphTopologicalSort.hpp
@@ -4,8 +4,8 @@
 //
 #pragma once
 
+#include <armnn/Optional.hpp>
 #include <boost/assert.hpp>
-#include <boost/optional.hpp>
 
 #include <functional>
 #include <map>
@@ -27,7 +27,7 @@
 
 
 template <typename TNodeId>
-boost::optional<TNodeId> GetNextChild(TNodeId node,
+armnn::Optional<TNodeId> GetNextChild(TNodeId node,
                                       std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges,
                                       std::map<TNodeId, NodeState>& nodeStates)
 {
@@ -70,11 +70,11 @@
 
         nodeStates[current] = NodeState::Visiting;
 
-        boost::optional<TNodeId> nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates);
+        auto nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates);
 
         if (nextChildOfCurrent)
         {
-            TNodeId nextChild = nextChildOfCurrent.get();
+            TNodeId nextChild = nextChildOfCurrent.value();
 
             // If the child has not been searched, add to the stack and iterate over this node
             if (nodeStates.find(nextChild) == nodeStates.end())
diff --git a/src/backends/BackendRegistry.hpp b/src/backends/BackendRegistry.hpp
index 23cb37d..4465e95 100644
--- a/src/backends/BackendRegistry.hpp
+++ b/src/backends/BackendRegistry.hpp
@@ -6,11 +6,12 @@
 
 #include <armnn/Types.hpp>
 #include "RegistryCommon.hpp"
+#include "IBackendInternal.hpp"
 
 namespace armnn
 {
 
-using BackendRegistry = RegistryCommon<IBackend, IBackendUniquePtr>;
+using BackendRegistry = RegistryCommon<IBackendInternal, IBackendInternalUniquePtr>;
 
 BackendRegistry& BackendRegistryInstance();
 
diff --git a/src/backends/IBackendInternal.hpp b/src/backends/IBackendInternal.hpp
index a441abd..7e44dbd 100644
--- a/src/backends/IBackendInternal.hpp
+++ b/src/backends/IBackendInternal.hpp
@@ -5,19 +5,27 @@
 #pragma once
 
 #include <armnn/Types.hpp>
-#include <backends/WorkloadFactory.hpp>
 
 namespace armnn
 {
+class IWorkloadFactory;
 
 class IBackendInternal : public IBackend
 {
 protected:
+    // Creation must be done through a specific
+    // backend interface.
     IBackendInternal() = default;
-    ~IBackendInternal() override = default;
 
 public:
-    virtual std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const = 0;
+    // Allow backends created by the factory function
+    // to be destroyed through IBackendInternal.
+    ~IBackendInternal() override = default;
+
+    using IWorkloadFactoryPtr = std::unique_ptr<IWorkloadFactory>;
+    virtual IWorkloadFactoryPtr CreateWorkloadFactory() const = 0;
 };
 
+using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
+
 } // namespace armnn
diff --git a/src/backends/WorkloadFactory.cpp b/src/backends/WorkloadFactory.cpp
index e7dec49..fea383f 100644
--- a/src/backends/WorkloadFactory.cpp
+++ b/src/backends/WorkloadFactory.cpp
@@ -5,10 +5,6 @@
 #include <backends/WorkloadFactory.hpp>
 #include <backends/LayerSupportRegistry.hpp>
 
-#include <backends/reference/RefWorkloadFactory.hpp>
-#include <backends/neon/NeonWorkloadFactory.hpp>
-#include <backends/cl/ClWorkloadFactory.hpp>
-
 #include <armnn/Types.hpp>
 #include <armnn/LayerSupport.hpp>
 #include <Layer.hpp>
@@ -24,40 +20,42 @@
 
 namespace
 {
-    const TensorInfo OverrideDataType(const TensorInfo& info, boost::optional<DataType> type)
-    {
-        if (type == boost::none)
-        {
-            return info;
-        }
 
-        return TensorInfo(info.GetShape(), type.get(), info.GetQuantizationScale(), info.GetQuantizationOffset());
+const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
+{
+    if (!type)
+    {
+        return info;
     }
 
-    boost::optional<DataType> GetBiasTypeFromWeightsType(boost::optional<DataType> weightsType)
-    {
-        if (weightsType == boost::none)
-        {
-            return weightsType;
-        }
-
-        switch(weightsType.get())
-        {
-            case DataType::Float16:
-            case DataType::Float32:
-                return weightsType;
-            case DataType::QuantisedAsymm8:
-                return DataType::Signed32;
-            default:
-                BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
-        }
-        return boost::none;
-    }
+    return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
 }
 
+Optional<DataType> GetBiasTypeFromWeightsType(Optional<DataType> weightsType)
+{
+    if (!weightsType)
+    {
+        return weightsType;
+    }
+
+    switch(weightsType.value())
+    {
+        case DataType::Float16:
+        case DataType::Float32:
+            return weightsType;
+        case DataType::QuantisedAsymm8:
+            return DataType::Signed32;
+        default:
+            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+    }
+    return EmptyOptional();
+}
+
+} // anonymous namespace
+
 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
                                         const IConnectableLayer& connectableLayer,
-                                        boost::optional<DataType> dataType,
+                                        Optional<DataType> dataType,
                                         std::string& outReasonIfUnsupported)
 {
     Optional<std::string&> reason = outReasonIfUnsupported;
@@ -589,7 +587,7 @@
 }
 
 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
-                                        boost::optional<DataType> dataType,
+                                        Optional<DataType> dataType,
                                         std::string& outReasonIfUnsupported)
 {
     auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
diff --git a/src/backends/WorkloadFactory.hpp b/src/backends/WorkloadFactory.hpp
index 41d6741..2d482e0 100644
--- a/src/backends/WorkloadFactory.hpp
+++ b/src/backends/WorkloadFactory.hpp
@@ -6,9 +6,9 @@
 
 #include <memory>
 #include <armnn/TensorFwd.hpp>
+#include <armnn/Optional.hpp>
 #include <backends/OutputHandler.hpp>
 #include <backends/Workload.hpp>
-#include <boost/optional.hpp>
 
 namespace armnn
 {
@@ -34,11 +34,11 @@
 
     static bool IsLayerSupported(const BackendId& backendId,
                                  const IConnectableLayer& layer,
-                                 boost::optional<DataType> dataType,
+                                 Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
     static bool IsLayerSupported(const IConnectableLayer& layer,
-                                 boost::optional<DataType> dataType,
+                                 Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
     virtual bool SupportsSubTensors() const = 0;
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 1bab96b..d6a3a89 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -23,7 +23,7 @@
     ClBackend::GetIdStatic(),
     []()
     {
-        return IBackendUniquePtr(new ClBackend, &ClBackend::Destroy);
+        return IBackendInternalUniquePtr(new ClBackend);
     }
 };
 
@@ -35,14 +35,9 @@
     return s_Id;
 }
 
-std::unique_ptr<IWorkloadFactory> ClBackend::CreateWorkloadFactory() const
+IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory() const
 {
     return std::make_unique<ClWorkloadFactory>();
 }
 
-void ClBackend::Destroy(IBackend* backend)
-{
-    delete boost::polymorphic_downcast<ClBackend*>(backend);
-}
-
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index 49a7a46..4eae6c9 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -18,9 +18,7 @@
     static const BackendId& GetIdStatic();
     const BackendId& GetId() const override { return GetIdStatic(); }
 
-    std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const override;
-
-    static void Destroy(IBackend* backend);
+    IWorkloadFactoryPtr CreateWorkloadFactory() const override;
 };
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index e1d8314..c697d90 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -35,7 +35,7 @@
 {
 
 bool ClWorkloadFactory::IsLayerSupported(const Layer& layer,
-                                         boost::optional<DataType> dataType,
+                                         Optional<DataType> dataType,
                                          std::string& outReasonIfUnsupported)
 {
     return IWorkloadFactory::IsLayerSupported(Compute::GpuAcc, layer, dataType, outReasonIfUnsupported);
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 66de3a5..1441b71 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -5,12 +5,11 @@
 #pragma once
 
 #include <armnn/IRuntime.hpp>
+#include <armnn/Optional.hpp>
 
 #include <backends/OutputHandler.hpp>
 #include <backends/aclCommon/memory/BaseMemoryManager.hpp>
 
-#include <boost/optional.hpp>
-
 namespace armnn
 {
 
@@ -22,7 +21,8 @@
 
     virtual Compute GetCompute() const override { return Compute::GpuAcc; }
 
-    static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+    static bool IsLayerSupported(const Layer& layer,
+                                 Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
     virtual bool SupportsSubTensors() const override { return true; }
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index b710295..e475f02 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -23,7 +23,7 @@
     NeonBackend::GetIdStatic(),
     []()
     {
-        return IBackendUniquePtr(new NeonBackend, &NeonBackend::Destroy);
+        return IBackendInternalUniquePtr(new NeonBackend);
     }
 };
 
@@ -35,14 +35,9 @@
     return s_Id;
 }
 
-std::unique_ptr<IWorkloadFactory> NeonBackend::CreateWorkloadFactory() const
+IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory() const
 {
     return std::make_unique<NeonWorkloadFactory>();
 }
 
-void NeonBackend::Destroy(IBackend* backend)
-{
-    delete boost::polymorphic_downcast<NeonBackend*>(backend);
-}
-
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index 6280610..e1287c7 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -18,9 +18,7 @@
     static const BackendId& GetIdStatic();
     const BackendId& GetId() const override { return GetIdStatic(); }
 
-    std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const override;
-
-    static void Destroy(IBackend* backend);
+    IWorkloadFactoryPtr CreateWorkloadFactory() const override;
 };
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 0e069a2..f0a9e76 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -25,7 +25,8 @@
 namespace armnn
 {
 
-bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer,
+                                           Optional<DataType> dataType,
                                            std::string& outReasonIfUnsupported)
 {
     return IWorkloadFactory::IsLayerSupported(Compute::CpuAcc, layer, dataType, outReasonIfUnsupported);
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 6495161..d1dd2c8 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -4,11 +4,12 @@
 //
 #pragma once
 
+#include <armnn/Optional.hpp>
 #include <backends/OutputHandler.hpp>
 #include <backends/aclCommon/memory/BaseMemoryManager.hpp>
 
 #include <boost/core/ignore_unused.hpp>
-#include <boost/optional.hpp>
+
 
 namespace armnn
 {
@@ -21,7 +22,8 @@
 
     virtual Compute GetCompute() const override { return Compute::CpuAcc; }
 
-    static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+    static bool IsLayerSupported(const Layer& layer,
+                                 Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
     virtual bool SupportsSubTensors() const override { return true; }
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index 9afb42d..34348fa 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -23,7 +23,7 @@
     RefBackend::GetIdStatic(),
     []()
     {
-        return IBackendUniquePtr(new RefBackend, &RefBackend::Destroy);
+        return IBackendInternalUniquePtr(new RefBackend);
     }
 };
 
@@ -35,14 +35,9 @@
     return s_Id;
 }
 
-std::unique_ptr<IWorkloadFactory> RefBackend::CreateWorkloadFactory() const
+IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory() const
 {
     return std::make_unique<RefWorkloadFactory>();
 }
 
-void RefBackend::Destroy(IBackend* backend)
-{
-    delete boost::polymorphic_downcast<RefBackend*>(backend);
-}
-
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 0cd3cf4..7162c9b 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -18,9 +18,7 @@
     static const BackendId& GetIdStatic();
     const BackendId& GetId() const override { return GetIdStatic(); }
 
-    std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const override;
-
-    static void Destroy(IBackend* backend);
+    IWorkloadFactoryPtr CreateWorkloadFactory() const override;
 };
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 048f6cd..783e5fb 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -25,7 +25,8 @@
 {
 }
 
-bool RefWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
+                                          Optional<DataType> dataType,
                                           std::string& outReasonIfUnsupported)
 {
     return IWorkloadFactory::IsLayerSupported(Compute::CpuRef, layer, dataType, outReasonIfUnsupported);
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 1a9227a..ef2e1ab 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -4,11 +4,12 @@
 //
 #pragma once
 
+#include <armnn/Optional.hpp>
 #include <backends/WorkloadFactory.hpp>
 #include <backends/OutputHandler.hpp>
 
 #include <boost/core/ignore_unused.hpp>
-#include <boost/optional.hpp>
+
 
 namespace armnn
 {
@@ -34,7 +35,8 @@
 
     virtual Compute GetCompute() const override { return Compute::CpuRef; }
 
-    static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
+    static bool IsLayerSupported(const Layer& layer,
+                                 Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
     virtual bool SupportsSubTensors() const override { return false; }
diff --git a/src/backends/test/BackendRegistryTests.cpp b/src/backends/test/BackendRegistryTests.cpp
index f6f7499..34a2706 100644
--- a/src/backends/test/BackendRegistryTests.cpp
+++ b/src/backends/test/BackendRegistryTests.cpp
@@ -55,7 +55,7 @@
         [&called]()
         {
             called = true;
-            return armnn::IBackendUniquePtr(nullptr, nullptr);
+            return armnn::IBackendInternalUniquePtr(nullptr);
         }
     );
 
@@ -82,7 +82,7 @@
         [&called]()
         {
             called = true;
-            return armnn::IBackendUniquePtr(nullptr, nullptr);
+            return armnn::IBackendInternalUniquePtr(nullptr);
         }
     );