MLCE-1092 Added layerNames to classic delegate

 * All layers added through the classic delegate will have a name that
   includes the nodeIndex from the tflite model.
 * Added utilities to ClassicDelegateUtils to get the names for the layers.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Iac567486d1f91c0a99b77ed8963f6b6ca26b0b59
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index a44f9ee..cf0134e 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -131,14 +131,16 @@
     }
 
     // Set up filter and biases
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+    auto layerName = GetLayerName(armnn::LayerType::Convolution2d, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
 
-    if(filterTensorInfo.IsConstant())
+    if (filterTensorInfo.IsConstant())
     {
         auto filter = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]], filterTensorInfo);
 
-        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
     }
@@ -149,7 +151,10 @@
         if(biasTensorInfo.IsConstant())
         {
             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
-            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+            auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+                                                                                           biasName.c_str());
             ARMNN_ASSERT(biasLayer != nullptr);
             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -157,11 +162,12 @@
     }
 
     // The data input can also be constant, so we must check that this is also allocated to an input slot
-    if(inputTensorInfo.IsConstant())
+    if (inputTensorInfo.IsConstant())
     {
         auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
@@ -183,7 +189,7 @@
     }
 
     // Check and Create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 
 // Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
@@ -311,7 +317,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer =  delegateData.m_Network->AddConvolution3dLayer(descriptor);
+    auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
+    armnn::IConnectableLayer* layer =  delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -321,7 +328,8 @@
     {
         auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
 
-        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
         ARMNN_ASSERT(weightsLayer != nullptr);
 
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -335,7 +343,8 @@
         {
             auto biases = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
 
-            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
+            auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases, biasName.c_str());
             ARMNN_ASSERT(biasLayer != nullptr);
 
             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -348,7 +357,8 @@
     {
         auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
@@ -368,7 +378,7 @@
     }
 
     // Check and create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 #endif
 
@@ -485,7 +495,9 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+    auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
+                                                                                             layerName.c_str());
     layer->SetBackendId(setBackend);
 
     if(filterTensorInfo.IsConstant())
@@ -493,7 +505,8 @@
         // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
         auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
 
-        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
     }
@@ -504,7 +517,10 @@
         if(biasTensorInfo.IsConstant())
         {
             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
-            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+            auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+                                                                                           biasName.c_str());
             ARMNN_ASSERT(biasLayer != nullptr);
             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -516,7 +532,8 @@
     {
         auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
@@ -537,7 +554,7 @@
         return kTfLiteOk;
     }
     // Check and create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 
 TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
@@ -683,9 +700,11 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
+    auto layerName = GetLayerName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
                                                                                              filterTensor,
-                                                                                             armnn::EmptyOptional());
+                                                                                             armnn::EmptyOptional(),
+                                                                                             layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -694,7 +713,8 @@
     {
         auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]], inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }