IVGCVSW-2881 Remove DebugDescriptor

* Also update Debug layer to use layer guid information

Change-Id: I9ec1f639299c3f855b670ff031a0e88d685cfc6b
Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 12b3c40..5916488 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -171,11 +171,10 @@
 bool IsDebugSupported(const BackendId& backend,
                       const TensorInfo& input,
                       const TensorInfo& output,
-                      const DebugDescriptor& descriptor,
                       char* reasonIfUnsupported,
                       size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
 }
 
 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index a7f89ff..126b56b 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -86,15 +86,12 @@
     debugLayers.reserve(layer.GetNumOutputSlots());
 
     // Connect a DebugLayer to each output slot of the layer
-    unsigned int outputSlotIndex = 0u;
     for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
     {
-        const std::string layerName(layer.GetName());
-        const std::string debugName = std::string("DebugLayerAfter") + layerName;
+        const std::string debugName = std::string("DebugLayerAfter") + layer.GetNameStr();
 
-        const DebugDescriptor descriptor(layerName, outputSlotIndex++);
         DebugLayer* debugLayer =
-            graph.InsertNewLayer<DebugLayer>(*outputSlot, descriptor, debugName.c_str());
+            graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str());
 
         // Sets output tensor info for the debug layer.
         TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 6fccca6..3491273 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -12,23 +12,26 @@
 namespace armnn
 {
 
-DebugLayer::DebugLayer(const DebugDescriptor& param, const char* name)
-    : LayerWithParameters(1, 1, LayerType::Debug, param, name)
+DebugLayer::DebugLayer(const char* name)
+    : Layer(1, 1, LayerType::Debug, name)
 {}
 
 std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const Graph& graph,
                                                       const IWorkloadFactory& factory) const
 {
+    const Layer& prevLayer = GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+
     DebugQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_LayerName = m_Param.m_LayerName;
-    descriptor.m_Parameters.m_SlotIndex = m_Param.m_SlotIndex;
+    descriptor.m_Guid = prevLayer.GetGuid();
+    descriptor.m_LayerName = prevLayer.GetNameStr();
+    descriptor.m_SlotIndex = GetInputSlot(0).GetConnectedOutputSlot()->CalculateIndexOnOwner();
 
     return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor, graph));
 }
 
 DebugLayer* DebugLayer::Clone(Graph& graph) const
 {
-    return CloneBase<DebugLayer>(graph, m_Param, GetName());
+    return CloneBase<DebugLayer>(graph, GetName());
 }
 
 void DebugLayer::ValidateTensorShapesFromInputs()
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index bc64541..3bd5a3d 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -4,13 +4,13 @@
 //
 #pragma once
 
-#include "LayerWithParameters.hpp"
+#include "Layer.hpp"
 
 namespace armnn
 {
 
 /// This layer visualizes the data flowing through the network.
-class DebugLayer : public LayerWithParameters<DebugDescriptor>
+class DebugLayer : public Layer
 {
 public:
     /// Makes a workload for the Debug type.
@@ -32,9 +32,8 @@
 
 protected:
     /// Constructor to create a DebugLayer.
-    /// @param [in] param DebugDescriptor to configure the debug layer.
     /// @param [in] name Optional name for the layer.
-    DebugLayer(const DebugDescriptor& param, const char* name);
+    DebugLayer(const char* name);
 
     /// Default destructor
     ~DebugLayer() = default;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 6358f6f..c170ac0 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -100,7 +100,6 @@
 
 bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
                                         const TensorInfo& output,
-                                        const DebugDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index bf81459..75c366c 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -58,7 +58,6 @@
 
     bool IsDebugSupported(const TensorInfo& input,
                           const TensorInfo& output,
-                          const DebugDescriptor& descriptor,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 09f5647..18bd921 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -380,9 +380,13 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
-struct DebugQueueDescriptor : QueueDescriptorWithParameters<DebugDescriptor>
+struct DebugQueueDescriptor : QueueDescriptor
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
+
+    LayerGuid m_Guid;
+    std::string m_LayerName;
+    unsigned int m_SlotIndex;
 };
 
 struct RsqrtQueueDescriptor : QueueDescriptor
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 72d0b19..0996a8a 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -192,14 +192,11 @@
         }
         case LayerType::Debug:
         {
-            auto cLayer = boost::polymorphic_downcast<const DebugLayer*>(&layer);
-
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
 
             result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
                                                           OverrideDataType(output, dataType),
-                                                          cLayer->GetParameters(),
                                                           reason);
             break;
         }
diff --git a/src/backends/backendsCommon/test/DebugTestImpl.hpp b/src/backends/backendsCommon/test/DebugTestImpl.hpp
index 14808f4..4af479d 100644
--- a/src/backends/backendsCommon/test/DebugTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DebugTestImpl.hpp
@@ -92,8 +92,9 @@
     unsigned int outputShape[] = {1, 2, 2, 3};
 
     armnn::DebugQueueDescriptor desc;
-    desc.m_Parameters.m_LayerName = "TestOutput";
-    desc.m_Parameters.m_SlotIndex = 1;
+    desc.m_Guid = 1;
+    desc.m_LayerName = "TestOutput";
+    desc.m_SlotIndex = 0;
 
     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
@@ -115,8 +116,9 @@
     });
 
     const std::string expectedStringOutput =
-        "{ \"layer\": \"TestOutput\","
-        " \"outputSlot\": 1,"
+        "{ \"layerGuid\": 1,"
+        " \"layerName\": \"TestOutput\","
+        " \"outputSlot\": 0,"
         " \"shape\": [1, 2, 2, 3],"
         " \"min\": 1, \"max\": 12,"
         " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
@@ -143,7 +145,9 @@
     unsigned int outputShape[] = {3, 3, 1};
 
     armnn::DebugQueueDescriptor desc;
-    desc.m_Parameters.m_LayerName = "TestOutput";
+    desc.m_Guid = 1;
+    desc.m_LayerName = "TestOutput";
+    desc.m_SlotIndex = 0;
 
     inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
     outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
@@ -163,7 +167,8 @@
     });
 
     const std::string expectedStringOutput =
-        "{ \"layer\": \"TestOutput\","
+        "{ \"layerGuid\": 1,"
+        " \"layerName\": \"TestOutput\","
         " \"outputSlot\": 0,"
         " \"shape\": [3, 3, 1],"
         " \"min\": 1, \"max\": 9,"
@@ -191,7 +196,9 @@
     unsigned int outputShape[] = {2, 2};
 
     armnn::DebugQueueDescriptor desc;
-    desc.m_Parameters.m_LayerName = "TestOutput";
+    desc.m_Guid = 1;
+    desc.m_LayerName = "TestOutput";
+    desc.m_SlotIndex = 0;
 
     inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
     outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
@@ -209,7 +216,8 @@
     });
 
     const std::string expectedStringOutput =
-        "{ \"layer\": \"TestOutput\","
+        "{ \"layerGuid\": 1,"
+        " \"layerName\": \"TestOutput\","
         " \"outputSlot\": 0,"
         " \"shape\": [2, 2],"
         " \"min\": 1, \"max\": 4,"
@@ -237,7 +245,9 @@
     unsigned int outputShape[] = {4};
 
     armnn::DebugQueueDescriptor desc;
-    desc.m_Parameters.m_LayerName = "TestOutput";
+    desc.m_Guid = 1;
+    desc.m_LayerName = "TestOutput";
+    desc.m_SlotIndex = 0;
 
     inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
     outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
@@ -253,7 +263,8 @@
     });
 
     const std::string expectedStringOutput =
-        "{ \"layer\": \"TestOutput\","
+        "{ \"layerGuid\": 1,"
+        " \"layerName\": \"TestOutput\","
         " \"outputSlot\": 0,"
         " \"shape\": [4],"
         " \"min\": 1, \"max\": 4,"
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 3df7183..79213c1 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -332,7 +332,7 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
 
-DECLARE_LAYER_POLICY_2_PARAM(Debug)
+DECLARE_LAYER_POLICY_1_PARAM(Debug)
 
 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
 
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index d89e548..820f36b 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -354,11 +354,9 @@
 
 bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
                                        const TensorInfo& output,
-                                       const DebugDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
     ignore_unused(output);
-    ignore_unused(descriptor);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      input.GetDataType(),
                                      &TrueFunc<>,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 3b73f22..c0d7fcf 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -56,7 +56,6 @@
 
     bool IsDebugSupported(const TensorInfo& input,
                           const TensorInfo& output,
-                          const DebugDescriptor& descriptor,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index cc83c7b..b263db6 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -16,9 +16,11 @@
 template <typename T>
 void Debug(const TensorInfo& inputInfo,
            const TensorInfo& outputInfo,
-           const DebugDescriptor& descriptor,
            const T* inputData,
-           T* outputData)
+           T* outputData,
+           LayerGuid guid,
+           const std::string& layerName,
+           unsigned int slotIndex)
 {
     const unsigned int numDims = inputInfo.GetNumDimensions();
     const unsigned int numElements = inputInfo.GetNumElements();
@@ -33,8 +35,9 @@
     }
 
     std::cout << "{ ";
-    std::cout << "\"layer\": \"" << descriptor.m_LayerName << "\", ";
-    std::cout << "\"outputSlot\": " << descriptor.m_SlotIndex << ", ";
+    std::cout << "\"layerGuid\": " << guid << ", ";
+    std::cout << "\"layerName\": \"" << layerName << "\", ";
+    std::cout << "\"outputSlot\": " << slotIndex << ", ";
     std::cout << "\"shape\": ";
 
     std::cout << "[";
@@ -89,13 +92,18 @@
 
 template void Debug<float>(const TensorInfo& inputInfo,
                            const TensorInfo& outputInfo,
-                           const DebugDescriptor& descriptor,
                            const float* inputData,
-                           float* outputData);
+                           float* outputData,
+                           LayerGuid guid,
+                           const std::string& layerName,
+                           unsigned int slotIndex);
 
 template void Debug<uint8_t>(const TensorInfo& inputInfo,
                              const TensorInfo& outputInfo,
-                             const DebugDescriptor& descriptor,
                              const uint8_t* inputData,
-                             uint8_t* outputData);
+                             uint8_t* outputData,
+                             LayerGuid guid,
+                             const std::string& layerName,
+                             unsigned int slotIndex);
+
 } // namespace armnn
diff --git a/src/backends/reference/workloads/Debug.hpp b/src/backends/reference/workloads/Debug.hpp
index 682f0bd..29a7d40 100644
--- a/src/backends/reference/workloads/Debug.hpp
+++ b/src/backends/reference/workloads/Debug.hpp
@@ -4,7 +4,6 @@
 //
 #pragma once
 
-#include <armnn/Descriptors.hpp>
 #include <armnn/Tensor.hpp>
 
 namespace armnn
@@ -13,8 +12,10 @@
 template <typename T>
 void Debug(const TensorInfo& inputInfo,
            const TensorInfo& outputInfo,
-           const DebugDescriptor& descriptor,
            const T* inputData,
-           T* outputData);
+           T* outputData,
+           LayerGuid guid,
+           const std::string& layerName,
+           unsigned int slotIndex);
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index d9a47c0..412d399 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -25,7 +25,7 @@
     const T* inputData = GetInputTensorData<T>(0, m_Data);
     T* outputData = GetOutputTensorData<T>(0, m_Data);
 
-    Debug(inputInfo, outputInfo, m_Data.m_Parameters, inputData, outputData);
+    Debug(inputInfo, outputInfo, inputData, outputData, m_Data.m_Guid, m_Data.m_LayerName, m_Data.m_SlotIndex);
 }
 
 template class RefDebugWorkload<DataType::Float32>;