MLCE-1092 Added layerNames to opaque delegate

 * All layers added through the opaque delegate will have a name that
   includes the nodeIndex from the tflite model.
 * Added utilities to OpaqueDelegateUtils to get the names for the layers.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Iadcc21646d0b6fcc2c524d6239211ad3af6b6577
diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp
index f566090..dd9c2f6 100644
--- a/delegate/opaque/src/Activation.hpp
+++ b/delegate/opaque/src/Activation.hpp
@@ -188,14 +188,16 @@
                                           outputTensorInfo,
                                           activationDesc);
     }
-    armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
+    auto layerName = GetName(activationDesc.m_Function, nodeIndex);
+    armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc,
+                                                                                           layerName.c_str());
     ARMNN_ASSERT(activationLayer != nullptr);
 
     armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(activationLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/ArgMinMax.hpp b/delegate/opaque/src/ArgMinMax.hpp
index e549902..5ea7aa8 100644
--- a/delegate/opaque/src/ArgMinMax.hpp
+++ b/delegate/opaque/src/ArgMinMax.hpp
@@ -144,7 +144,8 @@
     }
 
     // Add an ArgMinMax layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+    auto layerName = GetName(desc.m_Function, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -152,7 +153,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/BatchMatMul.hpp b/delegate/opaque/src/BatchMatMul.hpp
index 5261fbd..257c410 100644
--- a/delegate/opaque/src/BatchMatMul.hpp
+++ b/delegate/opaque/src/BatchMatMul.hpp
@@ -102,7 +102,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::BatchMatMul, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -110,7 +111,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/BatchSpace.hpp b/delegate/opaque/src/BatchSpace.hpp
index c760a14..00e2705 100644
--- a/delegate/opaque/src/BatchSpace.hpp
+++ b/delegate/opaque/src/BatchSpace.hpp
@@ -119,7 +119,8 @@
     }
 
     // Add a BatchToSpace layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::BatchToSpaceNd, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -127,7 +128,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
@@ -244,7 +245,8 @@
     }
 
     // Add a SpaceToBatch layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::SpaceToBatchNd, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -252,7 +254,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Comparison.hpp b/delegate/opaque/src/Comparison.hpp
index 8740cfb..026a43a 100644
--- a/delegate/opaque/src/Comparison.hpp
+++ b/delegate/opaque/src/Comparison.hpp
@@ -10,7 +10,7 @@
 namespace armnnOpaqueDelegate
 {
 
-std::string GetLayerName(armnn::ComparisonOperation comparisonOperation)
+std::string GetOperationName(armnn::ComparisonOperation comparisonOperation)
 {
     std::string layerName = "COMPARISON";
     switch (comparisonOperation)
@@ -123,11 +123,13 @@
 
     if (!delegateData.m_Network)
     {
-        validateFunc(outputTensorInfo, isSupported, GetLayerName(comparisonOperation));
+        validateFunc(outputTensorInfo, isSupported, GetOperationName(comparisonOperation));
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+    auto layerName = GetName(descriptor.m_Operation, nodeIndex);
+    armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor,
+                                                                                           layerName.c_str());
     comparisonLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(comparisonLayer != nullptr);
 
@@ -135,7 +137,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(comparisonLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Control.hpp b/delegate/opaque/src/Control.hpp
index bcf3c33..9aef838 100644
--- a/delegate/opaque/src/Control.hpp
+++ b/delegate/opaque/src/Control.hpp
@@ -140,7 +140,9 @@
     }
 
     // Setup layer and connect.
-    armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+    auto layerName = GetName(armnn::LayerType::Concat, nodeIndex);
+    armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor,
+                                                                                          layerName.c_str());
     concatenationLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(concatenationLayer != nullptr);
 
@@ -148,7 +150,8 @@
     auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
@@ -168,7 +171,7 @@
     }
 
     // Check and Create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData, nodeIndex);
 }
 
 TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
@@ -272,7 +275,8 @@
     }
 
     // Setup layer and connect.
-    armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+    auto layerName = GetName(armnn::LayerType::Mean, nodeIndex);
+    armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc, layerName.c_str());
     meanLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(meanLayer != nullptr);
 
@@ -280,7 +284,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(meanLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Convolution.hpp b/delegate/opaque/src/Convolution.hpp
index 2eb5eda..384c62b 100644
--- a/delegate/opaque/src/Convolution.hpp
+++ b/delegate/opaque/src/Convolution.hpp
@@ -154,14 +154,16 @@
     }
 
     // Set up filter and biases
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Convolution2d, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
 
     if(filterTensorInfo.IsConstant())
     {
         auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
 
-        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
     }
@@ -171,7 +173,10 @@
         if (biasTensorInfo.IsConstant())
         {
             auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
-            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+            auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
+            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+                                                                                           biasName.c_str());
             ARMNN_ASSERT(biasLayer != nullptr);
             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -183,7 +188,8 @@
     {
         auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
@@ -205,7 +211,7 @@
     }
 
     // Check and Create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 
 TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
@@ -348,7 +354,9 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::DepthwiseConvolution2d, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
+                                                                                             layerName.c_str());
     layer->SetBackendId(setBackend);
 
     if(filterTensorInfo.IsConstant())
@@ -356,7 +364,8 @@
         // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
         auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
 
-        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
         weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
     }
@@ -367,7 +376,9 @@
         {
             auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
 
-            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+            auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
+            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+                                                                                           biasName.c_str());
             ARMNN_ASSERT(biasLayer != nullptr);
             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
             biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -379,7 +390,8 @@
     {
         auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
@@ -400,7 +412,7 @@
         return kTfLiteOk;
     }
     // Check and create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 
 TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
@@ -552,7 +564,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer =  delegateData.m_Network->AddConvolution3dLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Convolution3d, nodeIndex);
+    armnn::IConnectableLayer* layer =  delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -563,7 +576,8 @@
         auto filter = CreateConstTensor(tfLiteFilterTensor,
                                         filterTensorInfo);
 
-        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
         ARMNN_ASSERT(weightsLayer != nullptr);
 
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -576,7 +590,9 @@
         {
             auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
 
-            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+            auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
+            armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+                                                                                           biasName.c_str());
             ARMNN_ASSERT(biasLayer != nullptr);
 
             biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -589,7 +605,8 @@
     {
         auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
 
-        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
@@ -609,7 +626,7 @@
     }
 
     // Check and create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 
 
@@ -781,9 +798,11 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
+    auto layerName = GetName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
                                                                                              filterTensor,
-                                                                                             armnn::EmptyOptional());
+                                                                                             armnn::EmptyOptional(),
+                                                                                             layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -792,7 +811,8 @@
     {
         auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
 
-        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
         inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
         inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     }
diff --git a/delegate/opaque/src/ElementwiseBinary.hpp b/delegate/opaque/src/ElementwiseBinary.hpp
index 8448609..2a67802 100644
--- a/delegate/opaque/src/ElementwiseBinary.hpp
+++ b/delegate/opaque/src/ElementwiseBinary.hpp
@@ -244,10 +244,14 @@
 
 std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
         DelegateData& delegateData,
-        const armnn::TensorInfo& outputTensorInfo)
+        const armnn::TensorInfo& outputTensorInfo,
+        int nodeIndex)
 {
+    auto layerName = GetName(armnn::BinaryOperation::Div, nodeIndex);
     armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-            armnn::BinaryOperation::Div);
+            armnn::BinaryOperation::Div,
+            layerName.c_str());
+
     // if the output of the div is Signed32 the Floor layer is not required
     if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
     {
@@ -255,7 +259,8 @@
     }
     armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
-    armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
+    auto floorName = GetName(armnn::LayerType::Floor, nodeIndex);
+    armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer(floorName.c_str());
     outputSlot.Connect(floorLayer->GetInputSlot(0));
     return std::make_pair(divisionLayer, floorLayer);
 }
@@ -411,46 +416,55 @@
 
     armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
     armnnDelegate::MultiLayerFacade multiLayer;
+    std::string layerName;
     switch(elementwiseBinaryOperatorCode)
     {
         case kTfLiteBuiltinAdd:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Add);
+            layerName = GetName(armnn::BinaryOperation::Add, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinDiv:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Div);
+            layerName = GetName(armnn::BinaryOperation::Div, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Div,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinFloorDiv:
         {
-            auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
+            auto layers = AddFloorDivLayer(delegateData, outputTensorInfo, nodeIndex);
             multiLayer.AssignValues(layers.first, layers.second);
             elementwiseBinaryLayer = &multiLayer;
         }
             break;
         case kTfLiteBuiltinMaximum:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Maximum);
+            layerName = GetName(armnn::BinaryOperation::Maximum, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Maximum,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinMinimum:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Minimum);
+            layerName = GetName(armnn::BinaryOperation::Minimum, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Minimum,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinMul:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Mul);
+            layerName = GetName(armnn::BinaryOperation::Mul, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinPow:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Power);
+            layerName = GetName(armnn::BinaryOperation::Power, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Power,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinSquaredDifference:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::SqDiff);
+            layerName = GetName(armnn::BinaryOperation::SqDiff, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::SqDiff,
+                                                                                       layerName.c_str());
             break;
         case kTfLiteBuiltinSub:
-            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
-                    armnn::BinaryOperation::Sub);
+            layerName = GetName(armnn::BinaryOperation::Sub, nodeIndex);
+            elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Sub,
+                                                                                       layerName.c_str());
             break;
         default:
             return kTfLiteError;
@@ -462,7 +476,8 @@
     auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
@@ -479,7 +494,8 @@
         return kTfLiteOk;
     }
     // Check and Create Activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData,
+                           nodeIndex);
 }
 
 } // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/ElementwiseUnary.hpp b/delegate/opaque/src/ElementwiseUnary.hpp
index df84846..24b851f 100644
--- a/delegate/opaque/src/ElementwiseUnary.hpp
+++ b/delegate/opaque/src/ElementwiseUnary.hpp
@@ -119,7 +119,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+    auto layerName = GetName(descriptor.m_Operation, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -127,7 +128,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Fill.hpp b/delegate/opaque/src/Fill.hpp
index a8cdf3a..fe27255 100644
--- a/delegate/opaque/src/Fill.hpp
+++ b/delegate/opaque/src/Fill.hpp
@@ -112,7 +112,8 @@
             return isSupported ? kTfLiteOk : kTfLiteError;
         }
 
-        armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+        auto layerName = GetName(armnn::LayerType::Fill, nodeIndex);
+        armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor, layerName.c_str());
         layer->SetBackendId(setBackend);
         ARMNN_ASSERT(layer != nullptr);
 
@@ -122,7 +123,8 @@
         auto inputsTensorsProcess = ProcessInputs(layer,
                                                   delegateData,
                                                   tfLiteContext,
-                                                  tfLiteNode);
+                                                  tfLiteNode,
+                                                  nodeIndex);
         if (inputsTensorsProcess == kTfLiteError)
         {
             return inputsTensorsProcess;
diff --git a/delegate/opaque/src/FullyConnected.hpp b/delegate/opaque/src/FullyConnected.hpp
index 3282cab..7be0668 100644
--- a/delegate/opaque/src/FullyConnected.hpp
+++ b/delegate/opaque/src/FullyConnected.hpp
@@ -186,7 +186,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::FullyConnected, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -270,7 +271,8 @@
                                 layer,
                                 reshapedOutputTensorInfo,
                                 outputTensorInfo,
-                                delegateData);
+                                delegateData,
+                                nodeIndex);
         if (!layer)
         {
             TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
@@ -289,7 +291,7 @@
     }
 
     // Check and Create Activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
 }
 
 } // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Gather.hpp b/delegate/opaque/src/Gather.hpp
index b27016e..73bb8a0 100644
--- a/delegate/opaque/src/Gather.hpp
+++ b/delegate/opaque/src/Gather.hpp
@@ -109,7 +109,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+    auto layerName = GetName(armnn::LayerType::Gather, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -117,7 +118,8 @@
     auto inputsTensorsProcess = ProcessInputs(layer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
diff --git a/delegate/opaque/src/GatherNd.hpp b/delegate/opaque/src/GatherNd.hpp
index a767d01..cab68da 100644
--- a/delegate/opaque/src/GatherNd.hpp
+++ b/delegate/opaque/src/GatherNd.hpp
@@ -82,7 +82,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+    auto layerName = GetName(armnn::LayerType::GatherNd, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer(layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -90,7 +91,8 @@
     auto inputsTensorsProcess = ProcessInputs(layer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
diff --git a/delegate/opaque/src/LogicalBinary.hpp b/delegate/opaque/src/LogicalBinary.hpp
index 44a443b..3bac72b 100644
--- a/delegate/opaque/src/LogicalBinary.hpp
+++ b/delegate/opaque/src/LogicalBinary.hpp
@@ -119,7 +119,9 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+    auto layerName = GetName(desc.m_Operation, nodeIndex);
+    armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc,
+                                                                                                 layerName.c_str());
     logicalBinaryLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(logicalBinaryLayer != nullptr);
 
@@ -129,7 +131,8 @@
     auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
diff --git a/delegate/opaque/src/Lstm.hpp b/delegate/opaque/src/Lstm.hpp
index b896b46..439e401 100644
--- a/delegate/opaque/src/Lstm.hpp
+++ b/delegate/opaque/src/Lstm.hpp
@@ -266,7 +266,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+    auto layerName = GetName(armnn::LayerType::Lstm, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
diff --git a/delegate/opaque/src/Normalization.hpp b/delegate/opaque/src/Normalization.hpp
index c6ac676..181e6e2 100644
--- a/delegate/opaque/src/Normalization.hpp
+++ b/delegate/opaque/src/Normalization.hpp
@@ -82,7 +82,8 @@
     }
 
     // Add a L2Normalization layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::L2Normalization, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -90,7 +91,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
@@ -183,7 +184,8 @@
     }
 
     // Add a Normalization layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Normalization, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -191,7 +193,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/OpaqueDelegateUtils.hpp b/delegate/opaque/src/OpaqueDelegateUtils.hpp
index 1c90ee0..7c9f0c8 100644
--- a/delegate/opaque/src/OpaqueDelegateUtils.hpp
+++ b/delegate/opaque/src/OpaqueDelegateUtils.hpp
@@ -10,6 +10,7 @@
 
 #include <armnn/ArmNN.hpp>
 #include <armnn/BackendHelper.hpp>
+#include <armnn/TypesUtils.hpp>
 #include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
@@ -23,8 +24,44 @@
 #include <tensorflow/lite/minimal_logging.h>
 #include <tensorflow/lite/kernels/kernel_util.h>
 
+#include <fmt/format.h>
+
 namespace
 {
+std::string GetName(armnn::ActivationFunction function, int nodeIndex)
+{
+    return fmt::format("{}:{}", GetActivationFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetName(armnn::ArgMinMaxFunction function, int nodeIndex)
+{
+    return fmt::format("{}:{}", GetArgMinMaxFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetName(armnn::BinaryOperation opType, int nodeIndex)
+{
+    return fmt::format("{}:{}", GetBinaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetName(armnn::ComparisonOperation layerType, int nodeIndex)
+{
+    return fmt::format("{}:{}", GetComparisonOperationAsCString(layerType), nodeIndex);
+}
+
+std::string GetName(armnn::LogicalBinaryOperation operation, int nodeIndex)
+{
+    return fmt::format("{}:{}", GetLogicalBinaryOperationAsCString(operation), nodeIndex);
+}
+
+std::string GetName(armnn::UnaryOperation opType, int nodeIndex)
+{
+    return fmt::format("{}:{}", GetUnaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetName(armnn::LayerType layerType, int nodeIndex, std::string subname = "")
+{
+    return fmt::format("{}{}:{}", GetLayerTypeAsCString(layerType), subname, nodeIndex);
+}
 
 // Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
 #define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
@@ -225,7 +262,8 @@
                              TfLiteFusedActivation activationType,
                              armnn::IConnectableLayer* prevLayer,
                              unsigned int outputSlotIndex,
-                             armnnOpaqueDelegate::DelegateData& data)
+                             armnnOpaqueDelegate::DelegateData& data,
+                             int nodeIndex)
 {
     const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
 
@@ -288,7 +326,8 @@
     {
         return kTfLiteError;
     }
-    armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+    auto layerName = GetName(activationDesc.m_Function, nodeIndex);
+    armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc, layerName.c_str());
     activationLayer->SetBackendId(setBackend);
 
     ARMNN_ASSERT(activationLayer != nullptr);
@@ -322,7 +361,8 @@
                                           armnn::IConnectableLayer* prevLayer,
                                           armnn::TensorInfo reshapedOutputTensorInfo,
                                           armnn::TensorInfo outputTensorInfo,
-                                          armnnOpaqueDelegate::DelegateData& data)
+                                          armnnOpaqueDelegate::DelegateData& data,
+                                          int nodeIndex)
 {
     armnn::ReshapeDescriptor desc;
     desc.m_TargetShape = outputTensorInfo.GetShape();
@@ -344,7 +384,8 @@
         return nullptr;
     }
 
-    armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+    auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex);
+    armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc, layerName.c_str());
     reshapeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(reshapeLayer != nullptr);
 
@@ -570,7 +611,8 @@
 TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
                            armnnOpaqueDelegate::DelegateData& delegateData,
                            TfLiteOpaqueContext* tfLiteContext,
-                           TfLiteOpaqueNode* tfLiteNode)
+                           TfLiteOpaqueNode* tfLiteNode,
+                           int nodeIndex)
 {
     // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
     // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
@@ -610,7 +652,9 @@
 
             auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
 
-            armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+            auto layerName = GetName(armnn::LayerType::Constant, nodeIndex);
+            armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput,
+                                                                                               layerName.c_str());
             constantLayer->SetBackendId(setBackend);
             armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
             outputSlot.SetTensorInfo(inputTensorInfo);
diff --git a/delegate/opaque/src/Pack.hpp b/delegate/opaque/src/Pack.hpp
index c3ea7da..5a05232 100644
--- a/delegate/opaque/src/Pack.hpp
+++ b/delegate/opaque/src/Pack.hpp
@@ -121,7 +121,8 @@
     }
 
     // The TfLite Pack operator is equivalent to the ArmNN Stack operator
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+    auto layerName = GetName(armnn::LayerType::Stack, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -129,7 +130,8 @@
     auto inputsTensorsProcess = ProcessInputs(layer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
diff --git a/delegate/opaque/src/Pad.hpp b/delegate/opaque/src/Pad.hpp
index 112e7bb..4305224 100644
--- a/delegate/opaque/src/Pad.hpp
+++ b/delegate/opaque/src/Pad.hpp
@@ -182,7 +182,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Pad, nodeIndex);
+    armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor, layerName.c_str());
     padLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(padLayer != nullptr);
 
diff --git a/delegate/opaque/src/Pooling.hpp b/delegate/opaque/src/Pooling.hpp
index 45a10f3..8e6500c 100644
--- a/delegate/opaque/src/Pooling.hpp
+++ b/delegate/opaque/src/Pooling.hpp
@@ -131,7 +131,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Pooling2d, nodeIndex);
+    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor, layerName.c_str());
     poolingLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(poolingLayer != nullptr);
 
@@ -139,18 +140,18 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
 
-    if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+    if (Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
     {
         return kTfLiteError;
     }
 
     // Check and create activation
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
 }
 
 TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
@@ -344,7 +345,8 @@
     }
 
     // Create the Layer
-    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Pooling3d, nodeIndex);
+    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor, layerName.c_str());
     poolingLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(poolingLayer != nullptr);
 
@@ -353,17 +355,17 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
 
-    if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+    if (Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
     {
         return kTfLiteError;
     }
 
-    return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
 }
 
 } // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Prelu.hpp b/delegate/opaque/src/Prelu.hpp
index 1a4037e..1c9f06d 100644
--- a/delegate/opaque/src/Prelu.hpp
+++ b/delegate/opaque/src/Prelu.hpp
@@ -98,7 +98,8 @@
                                      outputTensorInfo);
     }
 
-    armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer();
+    auto layerName = GetName(armnn::LayerType::Prelu, nodeIndex);
+    armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer(layerName.c_str());
     ARMNN_ASSERT(preluLayer != nullptr);
 
     bool isConstantAlpha = IsConstantTensor(tfLiteAlphaTensor);
@@ -108,7 +109,9 @@
     {
         auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, TfLiteOpaqueTensorData(tfLiteAlphaTensor));
 
-        armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor);
+        auto alphaName = GetName(armnn::LayerType::Constant, nodeIndex, "Alpha");
+        armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor,
+                                                                                        alphaName.c_str());
         ARMNN_ASSERT(constLayer != nullptr);
 
         constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
diff --git a/delegate/opaque/src/Quantization.hpp b/delegate/opaque/src/Quantization.hpp
index 7a1dd6f..d7f5c5c 100644
--- a/delegate/opaque/src/Quantization.hpp
+++ b/delegate/opaque/src/Quantization.hpp
@@ -79,7 +79,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+    auto layerName = GetName(armnn::LayerType::Dequantize, nodeIndex);
+    armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer(layerName.c_str());
     dequantizeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(dequantizeLayer != nullptr);
 
@@ -89,7 +90,8 @@
     auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
                                               delegateData,
                                               tfLiteContext,
-                                              tfLiteNode);
+                                              tfLiteNode,
+                                              nodeIndex);
     if (inputsTensorsProcess == kTfLiteError)
     {
         return inputsTensorsProcess;
@@ -176,7 +178,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+    auto layerName = GetName(armnn::LayerType::Quantize, nodeIndex);
+    armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer(layerName.c_str());
     quantizeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(quantizeLayer != nullptr);
 
@@ -184,7 +187,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(quantizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp
index ce90af0..5ce7a3d 100644
--- a/delegate/opaque/src/Redefine.hpp
+++ b/delegate/opaque/src/Redefine.hpp
@@ -73,7 +73,8 @@
     }
 
     // Add a Cast layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+    auto layerName = GetName(armnn::LayerType::Cast, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer(layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -81,7 +82,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
@@ -242,7 +243,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -250,7 +252,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
@@ -350,7 +352,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex, "Squeeze");
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -358,7 +361,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
@@ -478,7 +481,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex, "ExpandDims");
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -487,7 +491,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Reduce.hpp b/delegate/opaque/src/Reduce.hpp
index afea7aa..a7948ae 100644
--- a/delegate/opaque/src/Reduce.hpp
+++ b/delegate/opaque/src/Reduce.hpp
@@ -147,7 +147,8 @@
     }
 
     // Add an Reduce layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+    auto layerName = GetName(armnn::LayerType::Reduce, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -155,7 +156,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Resize.hpp b/delegate/opaque/src/Resize.hpp
index 509ae62..948b600 100644
--- a/delegate/opaque/src/Resize.hpp
+++ b/delegate/opaque/src/Resize.hpp
@@ -203,13 +203,16 @@
 
 
     armnn::IConnectableLayer* resizeLayer = nullptr;
+    layerName += ":";
+    layerName += nodeIndex;
+
     resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
 
     armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(resizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/ReverseV2.hpp b/delegate/opaque/src/ReverseV2.hpp
index e5714f4..5291aac 100644
--- a/delegate/opaque/src/ReverseV2.hpp
+++ b/delegate/opaque/src/ReverseV2.hpp
@@ -127,8 +127,6 @@
         }
     }
 
-    std::string layerName("ReverseV2");
-
     // Get axis tensor data
     auto axisTensorNumValues = static_cast<unsigned int>(TfLiteOpaqueTensorDim(tfLiteAxisTensor,0));
 
@@ -155,13 +153,14 @@
                                          outputTensorInfo);
     }
 
+    auto layerName = GetName(armnn::LayerType::ReverseV2, nodeIndex);
     armnn::IConnectableLayer* reverseV2Layer = delegateData.m_Network->AddReverseV2Layer(layerName.c_str());
 
     armnn::IOutputSlot& outputSlot = reverseV2Layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(reverseV2Layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(reverseV2Layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Round.hpp b/delegate/opaque/src/Round.hpp
index c64c210..4064b63 100644
--- a/delegate/opaque/src/Round.hpp
+++ b/delegate/opaque/src/Round.hpp
@@ -72,14 +72,15 @@
     }
 
     // Add a Floor layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer();
+    auto layerName = GetName(armnn::LayerType::Floor, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Shape.hpp b/delegate/opaque/src/Shape.hpp
index 4c37c38..9f15a4f 100644
--- a/delegate/opaque/src/Shape.hpp
+++ b/delegate/opaque/src/Shape.hpp
@@ -59,7 +59,7 @@
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
 
     auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
-    if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
+    if (shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64)
     {
         TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
                 tfLiteContext,
@@ -92,7 +92,8 @@
     }
 
     // Add a Shape layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+    auto layerName = GetName(armnn::LayerType::Shape, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer(layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -100,7 +101,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Slice.hpp b/delegate/opaque/src/Slice.hpp
index e39e4af..7876b7b 100644
--- a/delegate/opaque/src/Slice.hpp
+++ b/delegate/opaque/src/Slice.hpp
@@ -6,7 +6,6 @@
 #pragma once
 
 #include <OpaqueDelegateUtils.hpp>
-#include <fmt/format.h>
 
 namespace armnnOpaqueDelegate
 {
@@ -169,9 +168,9 @@
         validateFunc(outputTensorInfo, isSupported);
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
-    auto layerName = fmt::format("Slice:{}", nodeIndex);
 
     // Add a Slice layer
+    auto layerName = GetName(armnn::LayerType::Slice, nodeIndex);
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
@@ -180,7 +179,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Softmax.hpp b/delegate/opaque/src/Softmax.hpp
index 8792761..31fe1c9 100644
--- a/delegate/opaque/src/Softmax.hpp
+++ b/delegate/opaque/src/Softmax.hpp
@@ -125,6 +125,8 @@
     }
 
     armnn::IConnectableLayer* softmaxLayer = nullptr;
+    auto layerName = GetName(armnn::LayerType::Softmax, nodeIndex);
+
     switch(tfliteSoftmaxOperatorCode)
     {
         case kTfLiteBuiltinSoftmax:
@@ -132,13 +134,13 @@
             armnn::SoftmaxDescriptor descriptor;
             auto* nodeParameters = reinterpret_cast<TfLiteSoftmaxParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
             descriptor.m_Beta = nodeParameters->beta;
-            softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+            softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor, layerName.c_str());
             break;
         }
         case kTfLiteBuiltinLogSoftmax:
         {
             armnn::LogSoftmaxDescriptor descriptor;
-            softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+            softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor, layerName.c_str());
             break;
         }
         default:
@@ -150,7 +152,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(softmaxLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/SpaceDepth.hpp b/delegate/opaque/src/SpaceDepth.hpp
index 9cc61eb..a1c5544 100644
--- a/delegate/opaque/src/SpaceDepth.hpp
+++ b/delegate/opaque/src/SpaceDepth.hpp
@@ -83,12 +83,13 @@
     }
 
     // Add a SpaceToDepth layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::SpaceToDepth, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
@@ -173,7 +174,8 @@
     }
 
     // Add a DepthToSpace layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::DepthToSpace, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -181,7 +183,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Split.hpp b/delegate/opaque/src/Split.hpp
index d3d00e4..aec0fb6 100644
--- a/delegate/opaque/src/Split.hpp
+++ b/delegate/opaque/src/Split.hpp
@@ -157,7 +157,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+    auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -391,7 +392,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+    auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -401,7 +403,7 @@
     }
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/StridedSlice.hpp b/delegate/opaque/src/StridedSlice.hpp
index 9ac3342..2e17e32 100644
--- a/delegate/opaque/src/StridedSlice.hpp
+++ b/delegate/opaque/src/StridedSlice.hpp
@@ -153,7 +153,8 @@
     }
 
     // Add a StridedSlice layer
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::StridedSlice, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor, layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
@@ -161,7 +162,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Tile.hpp b/delegate/opaque/src/Tile.hpp
index 17cbdee..0ad65ca 100644
--- a/delegate/opaque/src/Tile.hpp
+++ b/delegate/opaque/src/Tile.hpp
@@ -167,7 +167,7 @@
                                     tileDescriptor);
     }
 
-    std::string layerName("Tile");
+    auto layerName = GetName(armnn::LayerType::Tile, nodeIndex);
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
 
     if (layer == nullptr)
@@ -177,7 +177,7 @@
 
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
-    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/Transpose.hpp b/delegate/opaque/src/Transpose.hpp
index 2627c42..5af03b3 100644
--- a/delegate/opaque/src/Transpose.hpp
+++ b/delegate/opaque/src/Transpose.hpp
@@ -94,7 +94,8 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+    auto layerName = GetName(armnn::LayerType::Transpose, nodeIndex);
+    armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor, layerName.c_str());
     transposeLayer->SetBackendId(setBackend);
     ARMNN_ASSERT(transposeLayer != nullptr);
     // Permutation vector given to descriptor object
@@ -104,7 +105,7 @@
     outputSlot.SetTensorInfo(outputTensorInfo);
 
     // try to connect the Constant Inputs if there are any
-    if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    if (ProcessInputs(transposeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/opaque/src/UnidirectionalSequenceLstm.hpp b/delegate/opaque/src/UnidirectionalSequenceLstm.hpp
index 790f287..2fd64c0 100644
--- a/delegate/opaque/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/opaque/src/UnidirectionalSequenceLstm.hpp
@@ -320,7 +320,10 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
-    armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+    auto layerName = GetName(armnn::LayerType::UnidirectionalSequenceLstm, nodeIndex);
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc,
+                                                                                                 params,
+                                                                                                 layerName.c_str());
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
diff --git a/delegate/opaque/src/Unpack.hpp b/delegate/opaque/src/Unpack.hpp
index 9b87bf7..0956d16 100644
--- a/delegate/opaque/src/Unpack.hpp
+++ b/delegate/opaque/src/Unpack.hpp
@@ -187,10 +187,9 @@
         return isSupported ? kTfLiteOk : kTfLiteError;
     };
 
-    std::string splitterLayerName("Unpack Splitter");
-
+    auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex, "Unpack");
     armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
-                                                                                       splitterLayerName.c_str());
+                                                                                       layerName.c_str());
     splitterLayer->SetBackendId(setBackendSplit);
     ARMNN_ASSERT(splitterLayer != nullptr);
 
@@ -206,7 +205,7 @@
     // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
     for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
     {
-        std::string reshapeLayerName("Unpack Reshape");
+        auto reshapeLayerName = GetName(armnn::LayerType::Reshape, nodeIndex, "Unpack");
         armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
                                                                                          reshapeLayerName.c_str());
         reshapeLayer->SetBackendId(setBackendReshape);