Refactor: Profiler moved to Graph

* This is to enable later work to instrument the Optimizer.

Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Change-Id: I2cf1fe022e0d100d6d8705adfbb8cab3ffc96a86
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index ebfc829..30639b1 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -27,6 +27,7 @@
 
 Graph::Graph(const Graph& other)
 :   m_LayersInOrder(other.m_LayersInOrder)
+,   m_Profiler(other.m_Profiler)
 {
     std::unordered_map<const Layer*, Layer*> otherToClonedMap;
 
@@ -636,4 +637,9 @@
     throw LayerValidationException(message.str());
 }
 
+const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
+{
+    return m_Profiler;
+}
+
 } // namespace armnn
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index e2321bb..74aefb2 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -6,6 +6,7 @@
 
 #include "LayersFwd.hpp"
 #include "IGraphObservable.hpp"
+#include "Profiling.hpp"
 
 #include <armnn/Types.hpp>
 #include <armnn/TensorFwd.hpp>
@@ -96,6 +97,7 @@
         : m_LayersInOrder(true)
         , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
                                                         ShapeInferenceMethod::ValidateOnly)
+        , m_Profiler(std::make_shared<IProfiler>())
         {}
 
     Graph(const Graph& other);
@@ -113,6 +115,7 @@
         m_OutputIds     = std::move(other.m_OutputIds);
         m_LayersInOrder = std::move(other.m_LayersInOrder);
         m_Views         = std::move(other.m_Views);
+        m_Profiler      = std::move(other.m_Profiler);
 
         other.ForEachLayer([this](Layer* otherLayer)
         {
@@ -220,6 +223,8 @@
     /// Gets the position of a layer in the graph.
     Iterator GetPosInGraph(Layer& layer);
 
+    const std::shared_ptr<IProfiler>& GetProfiler() const;
+
 private:
     template <typename LayerT>
     class LayerInGraphBase;
@@ -268,6 +273,7 @@
 
     std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
     ShapeInferenceMethod m_ShapeInferenceMethod;
+    std::shared_ptr<IProfiler> m_Profiler;
 
     // Throws exception due to a layer input not being connected to an output slot.
     /// Also verifies weights and bias are set for FullyConnected layers.
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index c161ed3..4688b6e 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -122,13 +122,13 @@
                              m_TensorHandleFactoryRegistry(),
                              m_ProfilingService(profilingService)
 {
-    // Create a profiler and register it for the current thread.
-    m_Profiler = std::make_shared<IProfiler>();
-    ProfilerManager::GetInstance().RegisterProfiler(m_Profiler.get());
+    // Get the profiler and register it for the current thread.
+    const std::shared_ptr<IProfiler>& profiler = m_OptimizedNetwork->GetProfiler();
+    ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
 
-    m_Profiler->EnableProfiling(networkProperties.m_ProfilingEnabled);
+    profiler->EnableProfiling(networkProperties.m_ProfilingEnabled);
 
-    m_Profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
+    profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
 
     Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
     //First create tensor handlers, backends and workload factories.
diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp
index 99dac55..71ceaa3 100644
--- a/src/armnn/LoadedNetwork.hpp
+++ b/src/armnn/LoadedNetwork.hpp
@@ -73,7 +73,7 @@
     // NOTE we return by reference as the purpose of this method is only to provide
     // access to the private m_Profiler and in theory we should not need to increment
     // the shared_ptr's reference counter
-    const std::shared_ptr<IProfiler>& GetProfiler() const { return m_Profiler; }
+    const std::shared_ptr<IProfiler>& GetProfiler() const { return m_OptimizedNetwork->GetProfiler(); }
 
     void FreeWorkingMemory();
 
@@ -126,7 +126,6 @@
     WorkloadFactoryMap  m_WorkloadFactories;
 
     std::unique_ptr<IOptimizedNetwork> m_OptimizedNetwork;
-    std::shared_ptr<IProfiler>         m_Profiler;
 
     WorkloadQueue                      m_InputQueue;
     WorkloadQueue                      m_WorkloadQueue;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4298b05..99d7b96 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -516,6 +516,11 @@
     return pOptimizedNetworkImpl->SerializeToDot(stream);
 }
 
+const std::shared_ptr<IProfiler>& IOptimizedNetwork::GetProfiler() const
+{
+    return pOptimizedNetworkImpl->GetGraph().GetProfiler();
+}
+
 profiling::ProfilingGuid IOptimizedNetwork::GetGuid() const
 {
     return pOptimizedNetworkImpl->GetGuid();
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index a54b712..ca28199 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -162,6 +162,10 @@
                                 std::string& errorMessage,
                                 const INetworkProperties& networkProperties)
 {
+    // Register the profiler
+    auto profiler = inNetwork->GetProfiler();
+    ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
+
     IOptimizedNetwork* rawNetwork = inNetwork.release();
 
     networkIdOut = GenerateNetworkId();
@@ -250,6 +254,9 @@
         context.second->AfterUnloadNetwork(networkId);
     }
 
+    // Unregister the profiler
+    ProfilerManager::GetInstance().RegisterProfiler(nullptr);
+
     ARMNN_LOG(debug) << "RuntimeImpl::UnloadNetwork(): Unloaded network with ID: " << networkId;
     return Status::Success;
 }