IVGCVSW-5980 Add Descriptor, TensorInfo and Convolution algorithm to JSON

 * Add GUID as field to layer details and profiling events
 * Add Optional GUID param to existing tests
 * Improve Details macro to be inline function
 * Fix some formatting

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I66f192a90a7642b3ee8e7dda0d3f428cce002581
diff --git a/src/armnn/JsonPrinter.cpp b/src/armnn/JsonPrinter.cpp
index 986edb9..b5612eb 100644
--- a/src/armnn/JsonPrinter.cpp
+++ b/src/armnn/JsonPrinter.cpp
@@ -24,6 +24,10 @@
     if (object.GetType() != JsonObjectType::ExecObjectDesc)
     {
         PrintLabel(object.m_Label, id);
+        if (object.m_Guid.has_value())
+        {
+            PrintGuid(object.m_Guid.value());
+        }
         PrintType(object.m_Type);
     }
 
@@ -120,6 +124,11 @@
     m_OutputStream << R"(")";
 }
 
+void JsonPrinter::PrintGuid(armnn::profiling::ProfilingGuid guid)
+{
+    PrintTabs();
+    m_OutputStream << std::quoted("GUID") << ": " << std::quoted(std::to_string(guid)) << std::endl;
+}
 
 void JsonPrinter::PrintMeasurementsList(const std::vector<double>& measurementsVector)
 {
diff --git a/src/armnn/JsonPrinter.hpp b/src/armnn/JsonPrinter.hpp
index 4af1609..9e8ecb4 100644
--- a/src/armnn/JsonPrinter.hpp
+++ b/src/armnn/JsonPrinter.hpp
@@ -10,6 +10,7 @@
 #include <set>
 #include <sstream>
 
+#include <ProfilingGuid.hpp>
 #include "Instrument.hpp"
 #include "JsonUtils.hpp"
 
@@ -27,7 +28,10 @@
 {
     // Object type changes according to the JsonObjectType specified in enum
     JsonChildObject(const std::string& label)
-        : m_Label(label), m_Unit(Measurement::Unit::TIME_MS), m_Type(JsonObjectType::Event)
+        : m_Label(label),
+          m_Unit(Measurement::Unit::TIME_MS),
+          m_Type(JsonObjectType::Event),
+          m_Guid(armnn::EmptyOptional())
     {}
     JsonChildObject(const JsonChildObject&) = default;
 
@@ -46,6 +50,11 @@
         }
     }
 
+    void SetGuid(profiling::ProfilingGuid guid)
+    {
+        m_Guid = Optional<profiling::ProfilingGuid>(guid);
+    }
+
     void AddChild(const JsonChildObject& childObject)
     {
         m_Children.push_back(childObject);
@@ -81,6 +90,7 @@
     std::string m_Label;
     Measurement::Unit m_Unit;
     JsonObjectType m_Type;
+    Optional<profiling::ProfilingGuid> m_Guid;
     std::vector<double> m_Measurements;
     std::vector<std::string> m_LayerDetailsList;
     std::vector<JsonChildObject> m_Children;
@@ -96,6 +106,7 @@
     void PrintLabel(const std::string& label, size_t id);
     void PrintUnit(armnn::Measurement::Unit unit);
     void PrintType(armnn::JsonObjectType type);
+    void PrintGuid(armnn::profiling::ProfilingGuid guid);
     void PrintMeasurementsList(const std::vector<double>& measurementsVector);
 
 public:
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 171d22b..509e9de 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -6,7 +6,6 @@
 
 #include <armnn/BackendId.hpp>
 #include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 
 #include "JsonPrinter.hpp"
 
@@ -156,8 +155,8 @@
         const double avgMs = eventStats.m_TotalMs / double(eventStats.m_Count);
 
         outStream << "\t" << std::setw(50) << eventLabel << " " << std::setw(9) << avgMs << " "
-            << std::setw(9) << eventStats.m_MinMs << " " << std::setw(9) << eventStats.m_MaxMs << " "
-            << std::setw(9) << eventStats.m_TotalMs << " " << std::setw(9) << eventStats.m_Count << std::endl;
+                  << std::setw(9) << eventStats.m_MinMs << " " << std::setw(9) << eventStats.m_MaxMs << " "
+                  << std::setw(9) << eventStats.m_TotalMs << " " << std::setw(9) << eventStats.m_Count << std::endl;
     }
     outStream << std::endl;
 }
@@ -198,12 +197,18 @@
 }
 
 Event* ProfilerImpl::BeginEvent(armnn::IProfiler* profiler,
-                            const BackendId& backendId,
-                            const std::string& label,
-                            std::vector<InstrumentPtr>&& instruments)
+                                const BackendId& backendId,
+                                const std::string& label,
+                                std::vector<InstrumentPtr>&& instruments,
+                                const Optional<profiling::ProfilingGuid>& guid)
 {
     Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
-    m_EventSequence.push_back(std::make_unique<Event>(label, profiler, parent, backendId, std::move(instruments)));
+    m_EventSequence.push_back(std::make_unique<Event>(label,
+                                                      profiler,
+                                                      parent,
+                                                      backendId,
+                                                      std::move(instruments),
+                                                      guid));
     Event* event = m_EventSequence.back().get();
     event->Start();
 
@@ -234,7 +239,7 @@
 
 int CalcLevel(const Event* eventPtr)
 {
-    int level=0;
+    int level = 0;
     while (eventPtr != nullptr)
     {
         eventPtr = eventPtr->GetParentEvent();
@@ -272,7 +277,7 @@
         auto it = outDescendantsMap.find(parent);
         if (it == outDescendantsMap.end())
         {
-            outDescendantsMap.emplace(parent, std::vector<const Event*>({eventPtrRaw}));
+            outDescendantsMap.emplace(parent, std::vector<const Event*>({ eventPtrRaw }));
         }
         else
         {
@@ -295,14 +300,22 @@
                         std::map<const Event*, std::vector<const Event*>> descendantsMap)
 {
     ARMNN_ASSERT(parentEvent);
+
+    // If profiling GUID is entered, process it
+    if (parentEvent->GetProfilingGuid().has_value())
+    {
+        profiling::ProfilingGuid profilingGuid;
+        profilingGuid = parentEvent->GetProfilingGuid().value();
+        parentObject.SetGuid(profilingGuid);
+    }
     std::vector<Measurement> instrumentMeasurements = parentEvent->GetMeasurements();
-    unsigned int childIdx=0;
-    for(size_t measurementIndex = 0; measurementIndex < instrumentMeasurements.size(); ++measurementIndex, ++childIdx)
+    unsigned int childIdx = 0;
+    for (size_t measurementIndex = 0; measurementIndex < instrumentMeasurements.size(); ++measurementIndex, ++childIdx)
     {
         if (inferenceIndex == 0)
         {
             // Only add kernel measurement once, in case of multiple inferences
-            JsonChildObject measurementObject{instrumentMeasurements[measurementIndex].m_Name};
+            JsonChildObject measurementObject{ instrumentMeasurements[measurementIndex].m_Name };
             measurementObject.SetUnit(instrumentMeasurements[measurementIndex].m_Unit);
             measurementObject.SetType(JsonObjectType::Measurement);
 
@@ -313,7 +326,6 @@
         parentObject.GetChild(childIdx).AddMeasurement(instrumentMeasurements[measurementIndex].m_Value);
     }
 
-
     auto childEventsIt = descendantsMap.find(parentEvent);
     if (childEventsIt != descendantsMap.end())
     {
@@ -322,7 +334,7 @@
             if (inferenceIndex == 0)
             {
                 // Only add second level once, in case of multiple inferences
-                JsonChildObject childObject{childEvent->GetName()};
+                JsonChildObject childObject{ childEvent->GetName() };
                 childObject.SetType(JsonObjectType::Event);
                 parentObject.AddChild(childObject);
             }
@@ -353,7 +365,7 @@
     std::map<const Event*, std::vector<const Event*>> descendantsMap;
     PopulateDescendants(descendantsMap);
 
-    JsonChildObject inferenceObject{"inference_measurements"};
+    JsonChildObject inferenceObject{ "inference_measurements" };
     std::vector<JsonChildObject> workloadObjects;
     std::map<unsigned int, std::vector<JsonChildObject>> workloadToKernelObjects;
 
@@ -368,15 +380,15 @@
 
     if (m_ProfilingDetails.get()->DetailsExist())
     {
-        JsonChildObject detailsObject{"layer_details"};
+        JsonChildObject detailsObject{ "layer_details" };
         ConfigureDetailsObject(detailsObject, m_ProfilingDetails.get()->GetProfilingDetails());
 
-        size_t id=0;
+        size_t id = 0;
         printer.PrintJsonChildObject(detailsObject, id);
     }
 
     // print inference object, also prints child layer and kernel measurements
-    size_t id=0;
+    size_t id = 0;
     printer.PrintJsonChildObject(inferenceObject, id);
 
     // end of ArmNN
@@ -403,9 +415,9 @@
     if (!saneMarkerSequence)
     {
         outStream << "Cannot write profiling stats. "
-            "Unexpected errors were found when analyzing the sequence of logged events, which may lead to plainly "
-            "wrong stats. The profiling system may contain implementation issues or could have been used in an "
-            "unsafe manner." << std::endl;
+                     "Unexpected errors were found when analyzing the sequence of logged events, "
+                     "which may lead to plainly wrong stats. The profiling system may contain implementation "
+                     "issues or could have been used in an unsafe manner." << std::endl;
         return;
     }
 
@@ -431,9 +443,8 @@
         std::map<const Event*, std::vector<const Event*>> descendantsMap;
         PopulateDescendants(descendantsMap);
 
-        std::function<void (const Event*, std::vector<const Event*>&)>
-            FindDescendantEvents = [&](const Event* eventPtr,
-                std::vector<const Event*>& sequence)
+        std::function<void(const Event*, std::vector<const Event*>&)>
+            FindDescendantEvents = [&](const Event* eventPtr, std::vector<const Event*>& sequence)
             {
                 sequence.push_back(eventPtr);
 
@@ -481,26 +492,31 @@
     static BackendId cpuRef("CpuRef");
     static BackendId cpuAcc("CpuAcc");
     static BackendId gpuAcc("GpuAcc");
-    if (backendId == cpuRef) {
-            // Cyan
-            return 0xffff001b;
-    } else if (backendId == cpuAcc) {
-            // Green
-            return 0x00ff001b;
-    } else if (backendId == gpuAcc) {
-            // Purple
-            return 0xff007f1b;
-    } else {
-            // Dark gray
-            return 0x5555551b;
+    if (backendId == cpuRef)
+    {
+        // Cyan
+        return 0xffff001b;
+    }
+    else if (backendId == cpuAcc)
+    {
+        // Green
+        return 0x00ff001b;
+    }
+    else if (backendId == gpuAcc)
+    {
+        // Purple
+        return 0xff007f1b;
+    }
+    else
+    {
+        // Dark gray
+        return 0x5555551b;
     }
 }
 
 // The thread_local pointer to the profiler instance.
 thread_local IProfiler* tl_Profiler = nullptr;
 
-
-
 ProfilerManager& ProfilerManager::GetInstance()
 {
     // Global reference to the single ProfileManager instance allowed.
@@ -518,7 +534,6 @@
     return tl_Profiler;
 }
 
-
 void IProfiler::EnableProfiling(bool enableProfiling)
 {
     pProfilerImpl->EnableProfiling(enableProfiling);
@@ -541,12 +556,14 @@
 
 Event* IProfiler::BeginEvent(const BackendId& backendId,
                              const std::string& label,
-                             std::vector<InstrumentPtr>&& instruments)
+                             std::vector<InstrumentPtr>&& instruments,
+                             const Optional<profiling::ProfilingGuid>& guid)
 {
-    return pProfilerImpl->BeginEvent(this, backendId, label, std::move(instruments));
+    return pProfilerImpl->BeginEvent(this, backendId, label, std::move(instruments), guid);
 }
 
 IProfiler::~IProfiler() = default;
-IProfiler::IProfiler() : pProfilerImpl(new ProfilerImpl()) {};
+IProfiler::IProfiler() : pProfilerImpl(new ProfilerImpl())
+{};
 
 } // namespace armnn
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index 785f505..a336a0e 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -4,12 +4,13 @@
 //
 #pragma once
 
+#include <common/include/ProfilingGuid.hpp>
 #include "ProfilingEvent.hpp"
 #include "ProfilingDetails.hpp"
-
-#include <armnn/utility/IgnoreUnused.hpp>
 #include "armnn/IProfiler.hpp"
 
+#include <armnn/Optional.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include "WallClockTimer.hpp"
 
 #include <chrono>
@@ -37,14 +38,16 @@
     Event* BeginEvent(armnn::IProfiler* profiler,
                       const BackendId& backendId,
                       const std::string& name,
-                      std::vector<InstrumentPtr>&& instruments);
+                      std::vector<InstrumentPtr>&& instruments,
+                      const Optional<profiling::ProfilingGuid>& guid);
 
     template<typename DescriptorType>
     void AddLayerDetails(const std::string& label,
                          const DescriptorType& desc,
-                         const WorkloadInfo& infos)
+                         const WorkloadInfo& infos,
+                         const profiling::ProfilingGuid guid)
     {
-        m_ProfilingDetails->AddDetailsToString(label, desc, infos);
+        m_ProfilingDetails->AddDetailsToString(label, desc, infos, guid);
     }
 
     // Marks the end of a user-defined event.
@@ -125,7 +128,10 @@
     using InstrumentPtr = std::unique_ptr<Instrument>;
 
     template<typename... Args>
-    ScopedProfilingEvent(const BackendId& backendId, const std::string& name, Args&&... args)
+    ScopedProfilingEvent(const BackendId& backendId,
+                         const Optional<profiling::ProfilingGuid>& guid,
+                         const std::string& name,
+                         Args&& ... args)
         : m_Event(nullptr)
         , m_Profiler(ProfilerManager::GetInstance().GetProfiler())
     {
@@ -134,7 +140,7 @@
             std::vector<InstrumentPtr> instruments(0);
             instruments.reserve(sizeof...(args)); //One allocation
             ConstructNextInVector(instruments, std::forward<Args>(args)...);
-            m_Event = m_Profiler->BeginEvent(backendId, name, std::move(instruments));
+            m_Event = m_Profiler->BeginEvent(backendId, name, std::move(instruments), guid);
         }
     }
 
@@ -165,43 +171,36 @@
 };
 
 // Helper to easily add operator details during profiling.
-class ScopedProfilingUpdateDescriptions
+template<typename DescriptorType>
+inline void ProfilingUpdateDescriptions(const std::string& name,
+                                        const DescriptorType& desc,
+                                        const WorkloadInfo& infos,
+                                        const profiling::ProfilingGuid guid)
 {
-public:
-    template<typename DescriptorType>
-    ScopedProfilingUpdateDescriptions(const std::string& name, const DescriptorType& desc, const WorkloadInfo& infos)
-        : m_Profiler(ProfilerManager::GetInstance().GetProfiler())
+    IProfiler* profiler(ProfilerManager::GetInstance().GetProfiler()); ///< Profiler used
+    if (profiler && profiler->IsProfilingEnabled())
     {
-        if (m_Profiler && m_Profiler->IsProfilingEnabled())
-        {
-            m_Profiler->AddLayerDetails(name, desc, infos);
-        }
+        profiler->AddLayerDetails(name, desc, infos, guid);
     }
-
-    ~ScopedProfilingUpdateDescriptions()
-    {}
-
-private:
-
-    IProfiler* m_Profiler; ///< Profiler used
-};
+}
 
 template<typename DescriptorType>
 void IProfiler::AddLayerDetails(const std::string& name,
                                 const DescriptorType& desc,
-                                const WorkloadInfo& infos)
+                                const WorkloadInfo& infos,
+                                const profiling::ProfilingGuid guid)
 {
-    return pProfilerImpl->AddLayerDetails(name, desc, infos);
+    return pProfilerImpl->AddLayerDetails(name, desc, infos, guid);
 }
 
 } // namespace armnn
 
 // Event Definitions for profiling
-#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC_INNER(lineNumber, backendId, /*name,*/ ...) \
-    armnn::ScopedProfilingEvent e_ ## lineNumber(backendId, /*name,*/ __VA_ARGS__);
+#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC_INNER(lineNumber, backendId, guid, /*name,*/ ...) \
+    armnn::ScopedProfilingEvent e_ ## lineNumber(backendId, guid, /*name,*/ __VA_ARGS__);
 
-#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC(lineNumber, backendId, /*name,*/ ...) \
-    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC_INNER(lineNumber, backendId, /*name,*/ __VA_ARGS__)
+#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC(lineNumber, backendId, guid, /*name,*/ ...) \
+    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC_INNER(lineNumber, backendId, guid, /*name,*/ __VA_ARGS__)
 
 // The event name must be known at compile time i.e. if you are going to use this version of the macro
 // in code the first argument you supply after the backendId must be the name.
@@ -210,18 +209,15 @@
 //       legal and unique variable name (so long as you don't use the macro twice on the same line).
 //       The concat preprocessing operator (##) very unhelpfully will not expand macros see
 //       https://gcc.gnu.org/onlinedocs/cpp/Concatenation.html for the gory details.
-#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, /*name,*/ ...) \
-    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC(__LINE__,backendId, /*name,*/ __VA_ARGS__)
+#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, guid, /*name,*/ ...) \
+    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS_UNIQUE_LOC(__LINE__,backendId, guid, /*name,*/ __VA_ARGS__)
 
 #define ARMNN_SCOPED_PROFILING_EVENT(backendId, name) \
-    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, name, armnn::WallClockTimer())
+    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, armnn::EmptyOptional(), name, armnn::WallClockTimer())
+
+#define ARMNN_SCOPED_PROFILING_EVENT_GUID(backendId, name, guid) \
+    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, guid, name, armnn::WallClockTimer())
 
 // Workload Description definitons for profiling
-#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC_UNIQUE_LOC_INNER(lineNumber, name, desc, infos) \
-    armnn::ScopedProfilingUpdateDescriptions e_ ## lineNumber(name, desc, infos);
-
-#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC_UNIQUE_LOC(lineNumber, name, desc, infos) \
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC_UNIQUE_LOC_INNER(lineNumber, name, desc, infos)
-
-#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos) \
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC_UNIQUE_LOC(__LINE__, name, desc, infos)
+#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid) \
+    armnn::ProfilingUpdateDescriptions(name, desc, infos, guid);
diff --git a/src/armnn/ProfilingDetails.hpp b/src/armnn/ProfilingDetails.hpp
index 7224aad..2f9a8f0 100644
--- a/src/armnn/ProfilingDetails.hpp
+++ b/src/armnn/ProfilingDetails.hpp
@@ -33,11 +33,17 @@
     template<typename DescriptorType>
     void AddDetailsToString(const std::string& workloadName,
                             const DescriptorType& desc,
-                            const WorkloadInfo& infos)
+                            const WorkloadInfo& infos,
+                            const profiling::ProfilingGuid guid)
     {
         m_ProfilingDetails << std::quoted("Name") << ": " << std::quoted(workloadName) << " ";
         PrintHeader();
 
+        PrintTabs();
+        m_ProfilingDetails << std::quoted("GUID") << ": " << std::quoted(std::to_string(guid));
+        PrintSeparator();
+        PrintNewLine();
+
         // Print tensor infos and related data types
         PrintInfos(infos.m_InputTensorInfos, "Input");
 
diff --git a/src/armnn/ProfilingEvent.cpp b/src/armnn/ProfilingEvent.cpp
index 1ba9789..31d4b91 100644
--- a/src/armnn/ProfilingEvent.cpp
+++ b/src/armnn/ProfilingEvent.cpp
@@ -12,12 +12,14 @@
              IProfiler* profiler,
              Event* parent,
              const BackendId backendId,
-             std::vector<InstrumentPtr>&& instruments)
+             std::vector<InstrumentPtr>&& instruments,
+             const Optional<profiling::ProfilingGuid> guid)
     : m_EventName(eventName)
     , m_Profiler(profiler)
     , m_Parent(parent)
     , m_BackendId(backendId)
     , m_Instruments(std::move(instruments))
+    , m_ProfilingGuid(guid)
 {
 }
 
@@ -27,7 +29,7 @@
     , m_Parent(other.m_Parent)
     , m_BackendId(other.m_BackendId)
     , m_Instruments(std::move(other.m_Instruments))
-
+    , m_ProfilingGuid(other.m_ProfilingGuid)
 {
 }
 
@@ -84,6 +86,12 @@
     return m_BackendId;
 }
 
+Optional<profiling::ProfilingGuid> Event::GetProfilingGuid() const
+{
+    return m_ProfilingGuid;
+}
+
+
 Event& Event::operator=(Event&& other) noexcept
 {
     if (this == &other)
@@ -95,6 +103,7 @@
     m_Profiler = other.m_Profiler;
     m_Parent = other.m_Parent;
     m_BackendId = other.m_BackendId;
+    m_ProfilingGuid = other.m_ProfilingGuid;
     other.m_Profiler = nullptr;
     other.m_Parent = nullptr;
     return *this;
diff --git a/src/armnn/ProfilingEvent.hpp b/src/armnn/ProfilingEvent.hpp
index 7afd5c1..b0d5be7 100644
--- a/src/armnn/ProfilingEvent.hpp
+++ b/src/armnn/ProfilingEvent.hpp
@@ -9,6 +9,10 @@
 #include <vector>
 #include <chrono>
 #include <memory>
+
+#include <common/include/ProfilingGuid.hpp>
+#include <armnn/Optional.hpp>
+
 #include "Instrument.hpp"
 #include "armnn/Types.hpp"
 
@@ -30,7 +34,8 @@
           IProfiler* profiler,
           Event* parent,
           const BackendId backendId,
-          std::vector<InstrumentPtr>&& instrument);
+          std::vector<InstrumentPtr>&& instrument,
+          const Optional<profiling::ProfilingGuid> guid);
 
     Event(const Event& other) = delete;
 
@@ -66,6 +71,10 @@
     /// \return Backend id of the event
     BackendId GetBackendId() const;
 
+    /// Get the associated profiling GUID if the event is a workload
+    /// \return Optional GUID of the event
+    Optional<profiling::ProfilingGuid> GetProfilingGuid() const;
+
     /// Assignment operator
     Event& operator=(const Event& other) = delete;
 
@@ -87,6 +96,9 @@
 
     /// Instruments to use
     Instruments m_Instruments;
+
+    /// Workload Profiling id
+    Optional<profiling::ProfilingGuid> m_ProfilingGuid;
 };
 
 } // namespace armnn
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index b27ad5a..8fd9717 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -4,16 +4,13 @@
 //
 
 #include <armnn/IRuntime.hpp>
-#include <armnn/TypesUtils.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 
 #include <doctest/doctest.h>
 
-#include <memory>
 #include <thread>
-#include <ostream>
 
 #include <Profiling.hpp>
+#include <armnn/Optional.hpp>
 
 namespace armnn
 {
@@ -216,7 +213,10 @@
             // Need to directly create a ScopedProfilingEvent as the one created by the macro falls out of scope
             // immediately causing the Event.Stop() function method to be called immediately after the Event.Start()
             // function resulting in periodic test failures on the Dent and Smith HiKeys
-            armnn::ScopedProfilingEvent testEvent(armnn::Compute::CpuAcc, "test", armnn::WallClockTimer());
+            armnn::ScopedProfilingEvent testEvent(armnn::Compute::CpuAcc,
+                                                  armnn::EmptyOptional(),
+                                                  "test",
+                                                  armnn::WallClockTimer());
             std::this_thread::sleep_for(std::chrono::milliseconds(10));
         }
 
@@ -288,18 +288,33 @@
 
     {
         // Test scoped macro.
-        ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, "EnqueueWorkload", TestInstrument())
-        ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, "Level 0", TestInstrument())
+        ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc,
+                                                      armnn::EmptyOptional(),
+                                                      "EnqueueWorkload",
+                                                      TestInstrument())
+        ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc,
+                                                      armnn::EmptyOptional(),
+                                                      "Level 0",
+                                                      TestInstrument())
         {
             {
-                ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, "Level 1A", TestInstrument())
+                ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc,
+                                                              armnn::EmptyOptional(),
+                                                              "Level 1A",
+                                                              TestInstrument())
             }
 
             {
-                ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, "Level 1B", TestInstrument())
+                ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc,
+                                                              armnn::EmptyOptional(),
+                                                              "Level 1B",
+                                                              TestInstrument())
 
                 {
-                    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, "Level 2A", TestInstrument())
+                    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc,
+                                                                  armnn::EmptyOptional(),
+                                                                  "Level 2A",
+                                                                  TestInstrument())
                 }
             }
         }
diff --git a/src/armnn/test/ProfilingEventTest.cpp b/src/armnn/test/ProfilingEventTest.cpp
index 1e3d1ea..c61f4e2 100644
--- a/src/armnn/test/ProfilingEventTest.cpp
+++ b/src/armnn/test/ProfilingEventTest.cpp
@@ -27,7 +27,8 @@
                     nullptr,
                     nullptr,
                     BackendId(),
-                    std::move(insts1));
+                    std::move(insts1),
+                    EmptyOptional());
 
     CHECK_EQ(testEvent.GetName(), "EventName");
 
@@ -50,7 +51,8 @@
                      profileManager.GetProfiler(),
                      &testEvent,
                      cpuAccBackendId,
-                     std::move(insts2));
+                     std::move(insts2),
+                     EmptyOptional());
 
     CHECK_EQ(&testEvent, testEvent2.GetParentEvent());
     CHECK_EQ(profileManager.GetProfiler(), testEvent2.GetProfiler());
@@ -70,7 +72,8 @@
                     nullptr,
                     nullptr,
                     BackendId(),
-                    std::move(insts1));
+                    std::move(insts1),
+                    EmptyOptional());
 
     CHECK_EQ(testEvent.GetName(), "GPUEvent");
 
@@ -93,7 +96,8 @@
                      profileManager.GetProfiler(),
                      &testEvent,
                      gpuAccBackendId,
-                     std::move(insts2));
+                     std::move(insts2),
+                     EmptyOptional());
 
     CHECK_EQ(&testEvent, testEvent2.GetParentEvent());
     CHECK_EQ(profileManager.GetProfiler(), testEvent2.GetProfiler());
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index b3df7ce..ab9d5bc 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -70,7 +70,6 @@
     : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
     , m_ConvolutionLayer(memoryManager)
 {
-    // todo: check tensor shapes match.
     const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
 
     m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
@@ -121,21 +120,22 @@
                                                   isFastMathEnabled);
 
      // Add details for profiling output
-    std::string workloadName = "ClConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid());
-
     WorkloadInfo detailsInfo;
 
     detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
     detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
     detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
-    detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString());
+    detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString(m_ConvolutionMethod));
     if (descriptor.m_Parameters.m_BiasEnabled)
     {
         detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
     }
 
     // Report Profiling Details
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo);
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClConvolution2dWorkload_Execute_Guid",
+                                         descriptor.m_Parameters,
+                                         detailsInfo,
+                                         this->GetGuid());
 
     InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
 
@@ -152,7 +152,7 @@
 
 void ClConvolution2dWorkload::Execute() const
 {
-    ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
+    ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClConvolution2dWorkload_Execute", this->GetGuid());
     RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
 }
 
@@ -161,23 +161,6 @@
     return m_ConvolutionMethod;
 }
 
-std::string ClConvolution2dWorkload::GetConvolutionMethodString()
-{
-    switch ( m_ConvolutionMethod )
-    {
-        case arm_compute::ConvolutionMethod::FFT:
-            return "FFT";
-        case arm_compute::ConvolutionMethod::DIRECT:
-            return "Direct";
-        case arm_compute::ConvolutionMethod::GEMM:
-            return "GEMM";
-        case arm_compute::ConvolutionMethod::WINOGRAD:
-            return "Winograd";
-        default:
-            return "Unknown";
-    }
-}
-
 void ClConvolution2dWorkload::FreeUnusedTensors()
 {
     FreeTensorIfUnused(m_KernelTensor);
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 49d7f77..d0f7a5b 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -37,7 +37,6 @@
     void Execute() const override;
 
     arm_compute::ConvolutionMethod GetConvolutionMethod() const;
-    std::string GetConvolutionMethodString();
 
 private:
     mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 467505d..41b97c1 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -19,6 +19,14 @@
 
 #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \
     ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
+                                                  armnn::EmptyOptional(), \
+                                                  name, \
+                                                  armnn::OpenClTimer(), \
+                                                  armnn::WallClockTimer())
+
+#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \
+    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \
+                                                  guid, \
                                                   name, \
                                                   armnn::OpenClTimer(), \
                                                   armnn::WallClockTimer())
@@ -26,6 +34,23 @@
 namespace armnn
 {
 
+inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
+{
+    switch (convolutionMethod)
+    {
+        case arm_compute::ConvolutionMethod::FFT:
+            return "FFT";
+        case arm_compute::ConvolutionMethod::DIRECT:
+            return "Direct";
+        case arm_compute::ConvolutionMethod::GEMM:
+            return "GEMM";
+        case arm_compute::ConvolutionMethod::WINOGRAD:
+            return "Winograd";
+        default:
+            return "Unknown";
+    }
+}
+
 template <typename T>
 void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcData)
 {
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 1e12e13..a6ae99b 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -119,21 +119,22 @@
                                                  isFastMathEnabled);
 
     // Add details for profiling output
-    std::string workloadName = "NeonConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid());
-
     WorkloadInfo detailsInfo;
 
     detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
     detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
     detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Weight->GetTensorInfo());
-    detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString());
+    detailsInfo.m_ConvolutionMethod = armnn::Optional<std::string>(GetConvolutionMethodString(m_ConvolutionMethod));
     if (descriptor.m_Parameters.m_BiasEnabled)
     {
         detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(descriptor.m_Bias->GetTensorInfo());
     }
 
     // Report Profiling Details
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo);
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonConvolution2dWorkload_Execute",
+                                         descriptor.m_Parameters,
+                                         detailsInfo,
+                                         this->GetGuid());
 
     m_ConvolutionLayer.reset(convolutionLayer.release());
 
@@ -152,7 +153,7 @@
 
 void NeonConvolution2dWorkload::Execute() const
 {
-    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvolution2dWorkload_Execute");
+    ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution2dWorkload_Execute", this->GetGuid());
     m_ConvolutionLayer->run();
 }
 
@@ -161,23 +162,6 @@
     return m_ConvolutionMethod;
 }
 
-std::string NeonConvolution2dWorkload::GetConvolutionMethodString()
-{
-    switch ( m_ConvolutionMethod )
-    {
-        case arm_compute::ConvolutionMethod::FFT:
-            return "FFT";
-        case arm_compute::ConvolutionMethod::DIRECT:
-            return "Direct";
-        case arm_compute::ConvolutionMethod::GEMM:
-            return "GEMM";
-        case arm_compute::ConvolutionMethod::WINOGRAD:
-            return "Winograd";
-        default:
-            return "Unknown";
-    }
-}
-
 void NeonConvolution2dWorkload::FreeUnusedTensors()
 {
     FreeTensorIfUnused(m_KernelTensor);
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
index 4b4c07a..4b6e58c 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
@@ -37,7 +37,6 @@
     void Execute() const override;
 
     arm_compute::ConvolutionMethod GetConvolutionMethod() const;
-    std::string GetConvolutionMethodString();
 
 private:
     std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer;
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index ab7616f..1199f30 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -16,6 +16,14 @@
 
 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
     ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+                                                  armnn::EmptyOptional(), \
+                                                  name, \
+                                                  armnn::NeonTimer(), \
+                                                  armnn::WallClockTimer())
+
+#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
+    ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+                                                  guid, \
                                                   name, \
                                                   armnn::NeonTimer(), \
                                                   armnn::WallClockTimer())
@@ -25,6 +33,23 @@
 namespace armnn
 {
 
+inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
+{
+    switch (convolutionMethod)
+    {
+        case arm_compute::ConvolutionMethod::FFT:
+            return "FFT";
+        case arm_compute::ConvolutionMethod::DIRECT:
+            return "Direct";
+        case arm_compute::ConvolutionMethod::GEMM:
+            return "GEMM";
+        case arm_compute::ConvolutionMethod::WINOGRAD:
+            return "Winograd";
+        default:
+            return "Unknown";
+    }
+}
+
 template <typename T>
 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
 {
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index 7c33171..b0b88b1 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -16,9 +16,6 @@
     const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
     : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
 {
-    // Construct params for reporting operator details
-    std::string workloadName = "RefConvolution2dWorkload_Execute_Guid" + std::to_string(this->GetGuid());
-
     WorkloadInfo detailsInfo;
     detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
     detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
@@ -29,7 +26,10 @@
     }
 
     // Report Profiling Details
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC(workloadName, descriptor.m_Parameters, detailsInfo);
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("RefConvolution2dWorkload_Execute",
+                                         descriptor.m_Parameters,
+                                         detailsInfo,
+                                         this->GetGuid());
 
     m_Weight = std::make_unique<ScopedTensorHandle>(*( descriptor.m_Weight ));
     const TensorInfo& rFilterInfo = m_Weight->GetTensorInfo();
@@ -57,8 +57,7 @@
 
 void RefConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
 {
-    std::string workloadName = "RefConvolutionWorkload_Execute_Guid" + std::to_string(this->GetGuid());
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, workloadName);
+    ARMNN_SCOPED_PROFILING_EVENT_GUID(Compute::CpuRef, "RefConvolution2dWorkload_Execute", this->GetGuid());
 
     std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
     std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());