IVGCVSW-5799 'Create Pimpl Idiom for Async prototype'

* Implemented Pimpl Idiom for IAsyncNetwork

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ic7311880563568b014a27f6347f8d41f2ad96df6
diff --git a/include/armnn/IAsyncNetwork.hpp b/include/armnn/IAsyncNetwork.hpp
index 7ef83bb..c234ae5 100644
--- a/include/armnn/IAsyncNetwork.hpp
+++ b/include/armnn/IAsyncNetwork.hpp
@@ -17,33 +17,46 @@
 
 namespace armnn
 {
+struct INetworkProperties;
+
+namespace profiling
+{
+class ProfilingService;
+}
 
 namespace experimental
 {
+class AsyncNetworkImpl;
 
 class IAsyncNetwork
 {
 public:
-    virtual ~IAsyncNetwork() {};
+    IAsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
+                  const INetworkProperties& networkProperties,
+                  profiling::ProfilingService& profilingService);
+    ~IAsyncNetwork();
 
-    virtual TensorInfo GetInputTensorInfo(LayerBindingId layerId) const = 0;
-    virtual TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const = 0;
+    TensorInfo GetInputTensorInfo(LayerBindingId layerId) const;
+    TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const;
 
     /// Thread safe execution of the network. Returns once execution is complete.
     /// Will block until this and any other thread using the same workingMem object completes.
-    virtual Status Execute(const InputTensors& inputTensors,
-                           const OutputTensors& outputTensors,
-                           IWorkingMemHandle& workingMemHandle) = 0;
+    Status Execute(const InputTensors& inputTensors,
+                   const OutputTensors& outputTensors,
+                   IWorkingMemHandle& workingMemHandle);
 
     /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
     /// overlapped Execution by calling this function from different threads.
-    virtual std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle() = 0;
+    std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle();
 
     /// Get the profiler used for this network
-    virtual std::shared_ptr<IProfiler> GetProfiler() const = 0;
+    std::shared_ptr<IProfiler> GetProfiler() const;
 
     /// Register a debug callback function to be used with this network
-    virtual void RegisterDebugCallback(const DebugCallbackFunction& func) = 0;
+    void RegisterDebugCallback(const DebugCallbackFunction& func);
+
+private:
+    std::unique_ptr<AsyncNetworkImpl> pAsyncNetworkImpl;
 };
 
 } // end experimental namespace
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 2db6d5d..5d2b4ed 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -706,7 +706,7 @@
 
 namespace experimental
 {
-class AsyncNetwork;
+class AsyncNetworkImpl;
 class WorkingMemHandle;
 }
 
@@ -730,7 +730,7 @@
 protected:
     friend class LoadedNetwork;
 
-    friend class experimental::AsyncNetwork;
+    friend class experimental::AsyncNetworkImpl;
     friend class experimental::WorkingMemHandle;
 
     friend Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
diff --git a/src/armnn/AsyncNetwork.cpp b/src/armnn/AsyncNetwork.cpp
index 4698bcf..4e3838b 100644
--- a/src/armnn/AsyncNetwork.cpp
+++ b/src/armnn/AsyncNetwork.cpp
@@ -26,6 +26,45 @@
 namespace experimental
 {
 
+IAsyncNetwork::IAsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
+                             const INetworkProperties& networkProperties,
+                             profiling::ProfilingService& profilingService)
+       : pAsyncNetworkImpl( new AsyncNetworkImpl(std::move(net), networkProperties, profilingService)) {};
+
+IAsyncNetwork::~IAsyncNetwork() = default;
+
+TensorInfo IAsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const
+{
+    return pAsyncNetworkImpl->GetInputTensorInfo(layerId);
+}
+
+TensorInfo IAsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
+{
+    return pAsyncNetworkImpl->GetOutputTensorInfo(layerId);
+}
+
+Status IAsyncNetwork::Execute(const InputTensors& inputTensors,
+                              const OutputTensors& outputTensors,
+                              IWorkingMemHandle& workingMemHandle)
+{
+    return pAsyncNetworkImpl->Execute(inputTensors, outputTensors, workingMemHandle);
+}
+
+std::unique_ptr<IWorkingMemHandle> IAsyncNetwork::CreateWorkingMemHandle()
+{
+    return pAsyncNetworkImpl->CreateWorkingMemHandle();
+}
+
+std::shared_ptr<IProfiler> IAsyncNetwork::GetProfiler() const
+{
+    return pAsyncNetworkImpl->GetProfiler();
+}
+
+void IAsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func)
+{
+    pAsyncNetworkImpl->RegisterDebugCallback(func);
+}
+
 void AddLayerStructure(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
                        const Layer& layer,
                        profiling::ProfilingGuid networkGuid)
@@ -63,7 +102,7 @@
                                       profiling::LabelsAndEventClasses::CHILD_GUID);
 }
 
-TensorInfo AsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const
+TensorInfo AsyncNetworkImpl::GetInputTensorInfo(LayerBindingId layerId) const
 {
     for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
     {
@@ -77,7 +116,7 @@
     throw InvalidArgumentException(fmt::format("No input layer is associated with id {0}}", layerId));
 }
 
-TensorInfo AsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
+TensorInfo AsyncNetworkImpl::GetOutputTensorInfo(LayerBindingId layerId) const
 {
     for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
     {
@@ -93,7 +132,7 @@
 }
 
 // Need something like the collectors to get the correct tensors for the inputs
-void AsyncNetwork::CollectInputTensorHandles(
+void AsyncNetworkImpl::CollectInputTensorHandles(
         std::unordered_map<LayerGuid, std::vector<ITensorHandle*> >& tensorHandles,
         std::vector<ITensorHandle*>& inputs,
         const armnn::Layer* layer,
@@ -128,7 +167,7 @@
     }
 }
 
-void AsyncNetwork::CreateOutputTensorHandles(
+void AsyncNetworkImpl::CreateOutputTensorHandles(
         std::unordered_map<LayerGuid, std::vector<ITensorHandle*> >& tensorHandles,
         std::vector<ITensorHandle*>& outputs,
         const armnn::Layer* layer,
@@ -156,7 +195,7 @@
     tensorHandles.insert({guid, tensorHandleVectors});
 }
 
-const IWorkloadFactory& AsyncNetwork::GetWorkloadFactory(const Layer& layer) const
+const IWorkloadFactory& AsyncNetworkImpl::GetWorkloadFactory(const Layer& layer) const
 {
     const IWorkloadFactory* workloadFactory = nullptr;
 
@@ -181,7 +220,9 @@
     return *workloadFactory;
 }
 
-void AsyncNetwork::EnqueueInput(const BindableLayer& layer, const ConstTensor& inputTensor, WorkingMemHandle& context)
+void AsyncNetworkImpl::EnqueueInput(const BindableLayer& layer,
+                                    const ConstTensor& inputTensor,
+                                    WorkingMemHandle& context)
 {
     if (layer.GetType() != LayerType::Input)
     {
@@ -232,7 +273,7 @@
     }
 }
 
-void AsyncNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outputTensor, WorkingMemHandle& handle)
+void AsyncNetworkImpl::EnqueueOutput(const BindableLayer& layer, const Tensor& outputTensor, WorkingMemHandle& handle)
 {
     if (layer.GetType() != LayerType::Output)
     {
@@ -304,7 +345,7 @@
     }
 }
 
-AsyncNetwork::AsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
+AsyncNetworkImpl::AsyncNetworkImpl(std::unique_ptr<IOptimizedNetwork> net,
                            const INetworkProperties& networkProperties,
                            profiling::ProfilingService& profilingService) :
     m_OptimizedNetwork(std::move(net)),
@@ -421,7 +462,7 @@
     }
 }
 
-Status AsyncNetwork::Execute(const InputTensors& inputTensors,
+Status AsyncNetworkImpl::Execute(const InputTensors& inputTensors,
                              const OutputTensors& outputTensors,
                              IWorkingMemHandle& iWorkingMemHandle)
 {
@@ -529,12 +570,12 @@
 }
 
 /// Get the profiler used for this network
-std::shared_ptr<IProfiler> AsyncNetwork::GetProfiler() const
+std::shared_ptr<IProfiler> AsyncNetworkImpl::GetProfiler() const
 {
     return m_Profiler;
 }
 
-void AsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func)
+void AsyncNetworkImpl::RegisterDebugCallback(const DebugCallbackFunction& func)
 {
     for (auto&& workloadPtr: m_WorkloadQueue)
     {
@@ -544,7 +585,7 @@
 
 /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
 /// overlapped Execution by calling this function from different threads.
-std::unique_ptr<IWorkingMemHandle> AsyncNetwork::CreateWorkingMemHandle()
+std::unique_ptr<IWorkingMemHandle> AsyncNetworkImpl::CreateWorkingMemHandle()
 {
     Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
     std::unordered_map<LayerGuid, std::vector<ITensorHandle*> > tensorHandles;
@@ -592,7 +633,7 @@
     return std::make_unique<WorkingMemHandle>(workingMemDescriptors, workingMemDescriptorMap);
 }
 
-void AsyncNetwork::FreeWorkingMemory()
+void AsyncNetworkImpl::FreeWorkingMemory()
 {
     // Informs the memory managers to release memory in it's respective memory group
     for (auto&& workloadFactory : m_WorkloadFactories)
diff --git a/src/armnn/AsyncNetwork.hpp b/src/armnn/AsyncNetwork.hpp
index 9c525c5..9bdc7ee 100644
--- a/src/armnn/AsyncNetwork.hpp
+++ b/src/armnn/AsyncNetwork.hpp
@@ -29,35 +29,35 @@
 namespace experimental
 {
 
-class AsyncNetwork final : public IAsyncNetwork
+class AsyncNetworkImpl final
 {
 public:
     using WorkloadQueue = std::vector<std::unique_ptr<IWorkload>>;
 
-    AsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
-                 const INetworkProperties &networkProperties,
-                 profiling::ProfilingService &profilingService);
+    AsyncNetworkImpl(std::unique_ptr<IOptimizedNetwork> net,
+                     const INetworkProperties &networkProperties,
+                     profiling::ProfilingService &profilingService);
 
-    ~AsyncNetwork() { FreeWorkingMemory(); }
+    ~AsyncNetworkImpl() { FreeWorkingMemory(); }
 
-    TensorInfo GetInputTensorInfo(LayerBindingId layerId) const override;
-    TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const override;
+    TensorInfo GetInputTensorInfo(LayerBindingId layerId) const;
+    TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const;
 
     /// Thread safe execution of the network. Returns once execution is complete.
     /// Will block until this and any other thread using the same workingMem object completes.
     virtual Status Execute(const InputTensors& inputTensors,
                            const OutputTensors& outputTensors,
-                           IWorkingMemHandle& workingMemHandle) override;
+                           IWorkingMemHandle& workingMemHandle);
 
     /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
     /// overlapped Execution by calling this function from different threads.
-    std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle() override;
+    std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle();
 
     /// Get the profiler used for this network
-    std::shared_ptr<IProfiler> GetProfiler() const override;
+    std::shared_ptr<IProfiler> GetProfiler() const;
 
     /// Register a debug callback function to be used with this network
-    void RegisterDebugCallback(const DebugCallbackFunction& func) override;
+    void RegisterDebugCallback(const DebugCallbackFunction& func);
 
 private:
     void FreeWorkingMemory();
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index 5dc1ef9..57aaabd 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -187,7 +187,7 @@
         context.second->BeforeLoadNetwork(networkIdOut);
     }
 
-    unique_ptr<AsyncNetwork> asyncNetwork = std::make_unique<AsyncNetwork>(
+    unique_ptr<IAsyncNetwork> asyncNetwork = std::make_unique<IAsyncNetwork>(
             std::unique_ptr<IOptimizedNetwork>(rawNetwork),
             networkProperties,
             m_ProfilingService);