IVGCVSW-8206 In TOSACommon, modify the way the Unique name for the inputs were generated.

* input_<GUID>
* constant_<GUID>
* intermediate<output slot>_<GUID>
* output<output slot>_<GUID>

Input and constant do not need output slot as input layers as well as
constants only have one output slot, therefore with the GUID is enough
to make them unique.
This was the case for constants, but for inputs we were adding the input
slot, which is not needed.

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I07393fc60f3135337b59a9780aa3a263a995fc9c
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 1ebb68b..0e44d54 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -127,7 +127,7 @@
     }
 }
 
-TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer)
+TosaSerializationBasicBlock* GetTosaMappingFromLayer(const Layer* layer)
 {
     std::vector<const TensorInfo*> inputs;
     for (auto inputSlot : layer->GetInputSlots())
diff --git a/src/backends/tosaCommon/TosaMappings.hpp b/src/backends/tosaCommon/TosaMappings.hpp
index cc41f1b..fe1ba3a 100644
--- a/src/backends/tosaCommon/TosaMappings.hpp
+++ b/src/backends/tosaCommon/TosaMappings.hpp
@@ -27,4 +27,4 @@
 
 // Function called in armnn::OptimizeSubgraphView() when access to armnn::Layer is available
 // and there is an option to set TOSA basic block data from constant layer tensors available from the input layer.
-TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer);
+TosaSerializationBasicBlock* GetTosaMappingFromLayer(const Layer* layer);
diff --git a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp b/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
index 480cdf5..c13555d 100644
--- a/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ActivationOperator.cpp
@@ -29,7 +29,7 @@
         throw armnn::Exception("ConvertActivationToTosaOperator: 1 output tensor required.");
     }
 
-    std::string inputName       = std::string("input0_");
+    std::string inputName       = std::string("input_");
     std::string outputNameAlpha = std::string("intermediate1_") + GetUniqueTosaMappingID();
     std::string outputNameMul   = std::string("intermediate2_") + GetUniqueTosaMappingID();
     std::string outputName      = std::string("output0_");
@@ -39,12 +39,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if (layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensors names.
-        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        inputName = GenerateUniqueName(connectedInputLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        inputName  = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<TosaSerializationTensor*> tensors;
@@ -54,7 +50,7 @@
     // There also can't be duplicate tensor.
     std::vector<int32_t> inputShape0;
     DType inputDType0 =  DType::DType_UNKNOWN;
-    if(inputName.find("input0_") != std::string::npos)
+    if(inputName.find("input_") != std::string::npos)
     {
         inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
         inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
index a7ca873..bd198e2 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,7 +10,7 @@
                                                                        const std::vector<const TensorInfo*>& outputs,
                                                                        const Pooling2dDescriptor* poolDescriptor)
 {
-    std::string padInputName   = std::string("input0_");
+    std::string padInputName   = std::string("input_");
     std::string padOutputName  = std::string("intermediate0_") + GetUniqueTosaMappingID();
     std::string poolOutputName = std::string("output0_");
     std::string blockName      = std::string("Op_AVG_POOL2D_block_") + GetUniqueTosaMappingID();
@@ -19,12 +19,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensors names.
-        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        padInputName = GenerateUniqueName(connectedInputLayer, 0);
-
-        // Determine unique output tensor name.
-        poolOutputName = GenerateUniqueOutputName(*layer, 0);
+        padInputName   = GenerateUniqueInputName(layer->GetInputSlot(0));
+        poolOutputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<int> paddings;
@@ -81,7 +77,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(padInputName.find("input0_") != std::string::npos)
+    if(padInputName.find("input_") != std::string::npos)
     {
         tensors.push_back(new TosaSerializationTensor(padInputName, inputShape, inputDType, {}));
     }
diff --git a/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp b/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
index d1ff0df..905f32c 100644
--- a/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -21,7 +21,7 @@
     {
         for (uint32_t i = 0; i < numInputs; ++i)
         {
-            inputNames.push_back("input"+ std::to_string(i) +"_");
+            inputNames.push_back("input_"+ std::to_string(i));
         }
     }
     // If a layer is present then the block will be used for execution, so input and output names need to be determined
@@ -31,14 +31,12 @@
         // Get the layers connected to the input slots and determine unique tensor names.
         for (uint32_t i = 0; i < numInputs; ++i)
         {
-            Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
-
-            std::string inputName = GenerateUniqueName(connectedLayer, i);
+            std::string inputName = GenerateUniqueInputName(layer->GetInputSlot(i));
             inputNames.push_back(inputName);
         }
 
         // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     auto axis = static_cast<int32_t>(concatDescriptor->GetConcatAxis());
@@ -51,8 +49,7 @@
                                                                   {outputName});
 
     std::vector<TosaSerializationTensor*> tensors;
-    tensors.reserve(numInputs);
-
+    tensors.reserve(numInputs + 1);
     for (uint32_t i = 0; i < numInputs; ++i)
     {
         // Only add input tensors for validation or when the connected layer is an input layer.
diff --git a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
index 96701d4..6d1699d 100644
--- a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
@@ -22,11 +22,11 @@
     // Set input names for validation purposes only.
     if(layer == nullptr)
     {
-        inputNames.emplace_back("input0_");
-        inputNames.emplace_back("input1_");
+        inputNames.emplace_back("input_0");
+        inputNames.emplace_back("input_1");
         if(conv2dDescriptor->m_BiasEnabled)
         {
-            inputNames.emplace_back("input2_");
+            inputNames.emplace_back("input_2");
         }
     }
     // If a layer is present then the block will be used for execution, so input and output names need to be
@@ -37,14 +37,12 @@
         // Get the layer connected to the input slot and determine unique tensor names.
         for (uint32_t i = 0; i < inputs.size(); ++i)
         {
-            Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
-
-            std::string inputName = GenerateUniqueName(connectedLayer, i);
+            std::string inputName = GenerateUniqueInputName(layer->GetInputSlot(i));
             inputNames.push_back(inputName);
         }
 
         // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<TosaSerializationTensor*> tensors;
@@ -54,7 +52,7 @@
     // Only add tensor if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensors.
-    if(inputNames[0].find("input0_") != std::string::npos)
+    if(inputNames[0].find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
 
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
index a9af249..55b4f15 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
@@ -11,8 +11,8 @@
                                                                     const std::vector<const TensorInfo*>& outputs,
                                                                     const ElementwiseBinaryDescriptor* descriptor)
 {
-    std::string input0Name = std::string("input0_");
-    std::string input1Name = std::string("input1_");
+    std::string input0Name = std::string("input_0");
+    std::string input1Name = std::string("input_1");
     std::string outputName = std::string("output0_");
     std::string blockName;
 
@@ -20,15 +20,9 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        input0Name = GenerateUniqueName(connectedLayer0, 0);
-
-        Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer();
-        input1Name = GenerateUniqueName(connectedLayer1, 1);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+        input1Name = GenerateUniqueInputName(layer->GetInputSlot(1));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     TosaSerializationOperator* op = nullptr;
@@ -93,13 +87,13 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(input0Name.find("input0_") != std::string::npos)
+    if(input0Name.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
         tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
     }
-    if(input1Name.find("input1_") != std::string::npos)
+    if(input1Name.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
         DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
index 02dddab..d0eac0b 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
@@ -10,7 +10,7 @@
                                                              const std::vector<const TensorInfo*>& outputs,
                                                              const ElementwiseUnaryDescriptor* unaryDescriptor)
 {
-    std::string input0Name = std::string("input0_");
+    std::string input0Name = std::string("input_");
     std::string outputName = std::string("output0_");
     std::string blockName  = std::string("Op_ELEMENTWISEUNARY_block_") + GetUniqueTosaMappingID();
 
@@ -19,12 +19,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layer connected to the input slot and determine unique the tensor name.
-        Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        input0Name = GenerateUniqueName(connectedLayer0, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     TosaSerializationOperator* op = nullptr;
@@ -48,7 +44,7 @@
     // Only add input tensor if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(input0Name.find("input0_") != std::string::npos)
+    if(input0Name.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
index c33f612..56e3f34 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -13,7 +13,7 @@
     std::string poolType = (poolDescriptor->m_PoolType == PoolingAlgorithm::Max) ? "Op_MAX" : "Op_AVG";
     Op opcode = (poolDescriptor->m_PoolType == PoolingAlgorithm::Max) ? Op_MAX_POOL2D : Op_AVG_POOL2D;
 
-    std::string input0Name = std::string("input0_");
+    std::string input0Name = std::string("input_");
     std::string outputName = std::string("output0_");
     std::string blockName  = std::string("Op_") + poolType + std::string("_POOL2D_block_") + GetUniqueTosaMappingID();
 
@@ -21,12 +21,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        input0Name = GenerateUniqueName(connectedInputLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<int> pad = {static_cast<int>(poolDescriptor->m_PadTop),
@@ -50,7 +46,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(input0Name.find("input0_") != std::string::npos)
+    if(input0Name.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
index 1242d3b..a4d7d0e 100644
--- a/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/QuantizeOperator.cpp
@@ -21,7 +21,7 @@
     ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( outputs.size() == 1,
                                          "ConvertQuantizeToTosaOperator: Quantize must have only one output" );
 
-    std::string inputName           = std::string("input0_");
+    std::string inputName           = std::string("input_");
     std::string outputName          = std::string("output0_");
     std::string blockName           = std::string("Op_QUANTIZE_block_") + GetUniqueTosaMappingID();
 
@@ -29,12 +29,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        inputName = GenerateUniqueName(connectedLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        inputName  = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     const TensorInfo inputInfo = *inputs[0];
@@ -60,7 +56,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(inputName.find("input0_") != std::string::npos)
+    if(inputName.find("input_") != std::string::npos)
     {
         tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
     }
diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
index 55d6680..e7e5dc7 100644
--- a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,7 +10,7 @@
                                                           const std::vector<const TensorInfo*>& outputs,
                                                           const ReshapeDescriptor* reshapeDescriptor)
 {
-    std::string inputName = std::string("input0_");
+    std::string inputName = std::string("input_");
     std::string outputName = std::string("output0_");
     std::string blockName  = std::string("Op_RESHAPE_block_") + GetUniqueTosaMappingID();
 
@@ -18,12 +18,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        inputName = GenerateUniqueName(connectedLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        inputName  = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     TosaReshapeAttribute attribute(GetTosaTensorShape(reshapeDescriptor->m_TargetShape));
@@ -39,7 +35,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(inputName.find("input0_") != std::string::npos)
+    if(inputName.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp
index 72c7352..bb1eabd 100644
--- a/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ResizeOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 // Copyright © 2020, 2023 The TensorFlow Authors. All Rights Reserved.
@@ -37,7 +37,7 @@
         throw armnn::InvalidArgumentException("ConvertResizeToTosaOperator: Unsupported Resize method.");
     }
 
-    std::string inputName = std::string("input0_");
+    std::string inputName = std::string("input_");
     std::string outputName = std::string("output0_");
     std::string blockName  = std::string("Op_RESIZE_block_") + GetUniqueTosaMappingID();
 
@@ -45,12 +45,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        inputName = GenerateUniqueName(connectedLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        inputName  = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     int32_t inputHeight = static_cast<int32_t>(inputs[0]->GetShape()[1]);
@@ -149,7 +145,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(inputName.find("input0_") != std::string::npos)
+    if(inputName.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
index 294d389..5fe0c8d 100644
--- a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,7 +10,7 @@
                                                         const std::vector<const TensorInfo*>& outputs,
                                                         const SliceDescriptor* sliceDescriptor)
 {
-    std::string inputName = std::string("input0_");
+    std::string inputName = std::string("input_");
     std::string outputName = std::string("output0_");
     std::string blockName  = std::string("Op_SLICE_block_") + GetUniqueTosaMappingID();
 
@@ -18,12 +18,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        inputName = GenerateUniqueName(connectedLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        inputName  = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<int32_t> begin(sliceDescriptor->m_Begin.begin(), sliceDescriptor->m_Begin.end());
@@ -42,7 +38,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(inputName.find("input0_") != std::string::npos)
+    if(inputName.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp b/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp
index b733866..53f4f05 100644
--- a/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/SplitOperator.cpp
@@ -27,7 +27,7 @@
         throw armnn::Exception("ConvertSplitToTosaOperator: Dynamic input dimensions are unsupported.");
     }
 
-    std::string inputName = std::string("input0_");
+    std::string inputName = std::string("input_");
     std::vector<std::string> outputNames;
     std::string blockName  = std::string("Op_SPLIT_block_") + GetUniqueTosaMappingID();
 
@@ -36,9 +36,7 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        inputName = GenerateUniqueName(connectedLayer, 0);
+        inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
 
         for (unsigned int i=0; i < numSplit; ++i)
         {
@@ -87,7 +85,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(inputName.find("input0_") != std::string::npos)
+    if(inputName.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index b7f14bf..f566504 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -87,7 +87,7 @@
 }
 
 // Function that generates unique name using the layer type, input slot and layer guid.
-inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
+static std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
 {
     std::string guid        = std::to_string(layer.GetGuid());
     std::string slotAndGuid = std::to_string(layerSlot) + "_" + guid;
@@ -95,7 +95,7 @@
     switch (layer.GetType())
     {
         case LayerType::Input:
-            return "input" + slotAndGuid;
+            return "input_" + guid;
         case LayerType::Output:
             return "output" + slotAndGuid;
         case LayerType::Constant:
@@ -105,8 +105,19 @@
     }
 }
 
+// Function that generates unique name for the parent layer from the child layer input slot.
+inline std::string GenerateUniqueInputName(const armnn::InputSlot& slot)
+{
+    // Get the layers connected to the input slots and determine unique tensor names.
+    Layer& connectedLayer = slot.GetConnectedOutputSlot()->GetOwningLayer();
+    // For layer input, we want to ensure we get the correct output slot of the parent layer.
+    // For example, if parent layer is split, the parent output slot could be 0 or 1 index.
+    uint32_t connectedOutputSlotIdx = slot.GetConnectedOutputSlot()->CalculateIndexOnOwner();
+    return GenerateUniqueName(connectedLayer, connectedOutputSlotIdx);
+}
+
 // Function that generates unique output name using the layer type, input slot and layer guid.
-inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot)
+inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot = 0)
 {
     Layer& connectedLayer = layer.GetOutputSlot().GetConnection(0)->GetOwningLayer();
 
@@ -443,6 +454,12 @@
             error = TosaSerializationHandler::ConvertI8toU8(data, uint8Data);
             break;
         }
+        case DType::DType_UINT8:
+        {
+            const int8_t* copy_data = static_cast<const int8_t*>(value);
+            uint8Data.assign(copy_data, copy_data + numElements);
+            break;
+        }
         case DType::DType_INT4:
         {
             std::vector<int8_t> data(numElements, *static_cast<const int8_t*>(value));
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
index 8c2ae9f..81d58e0 100644
--- a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
@@ -12,7 +12,7 @@
                                                                   const std::vector<const TensorInfo*>& outputs,
                                                                   const TransposeConvolution2dDescriptor* descriptor)
 {
-    std::string input0Name = std::string("input0_");
+    std::string input0Name = std::string("input_");
     std::string input1Name = std::string("constant_") + GetUniqueTosaMappingID();
     std::string input2Name = std::string("constant_") + GetUniqueTosaMappingID();
     std::string outputName = std::string("output0_");
@@ -22,12 +22,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique tensor names.
-        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        input0Name = GenerateUniqueName(connectedInputLayer, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<TosaSerializationTensor*> tensors;
@@ -37,7 +33,7 @@
     // Only add tensor if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensors.
-    if(input0Name.find("input0_") != std::string::npos)
+    if(input0Name.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp
index ccc7774..229a1b2 100644
--- a/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/TransposeOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,7 +10,7 @@
                                                             const std::vector<const TensorInfo*>& outputs,
                                                             const TransposeDescriptor* transposeDescriptor)
 {
-    std::string input0Name = std::string("input0_");
+    std::string input0Name = std::string("input_");
     std::string outputName = std::string("output0_");
     std::string blockName  = std::string("Op_TRANSPOSE_block_") + GetUniqueTosaMappingID();
 
@@ -18,12 +18,8 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slot and determine unique tensor name.
-        Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
-        input0Name = GenerateUniqueName(connectedLayer0, 0);
-
-        // Determine unique output tensor name.
-        outputName = GenerateUniqueOutputName(*layer, 0);
+        input0Name = GenerateUniqueInputName(layer->GetInputSlot(0));
+        outputName = GenerateUniqueOutputName(*layer);
     }
 
     std::vector<int32_t> mappings(transposeDescriptor->m_DimMappings.begin(),
@@ -42,7 +38,7 @@
     // Only add input tensors if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
     // There also can't be duplicate tensor.
-    if(input0Name.find("input0_") != std::string::npos)
+    if(input0Name.find("input_") != std::string::npos)
     {
         std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
         DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
diff --git a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
index 6f57c4a..4c38d6b 100644
--- a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
+++ b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022,2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -39,7 +39,7 @@
         std::basic_string<char> blockInputName = basicBlock->GetInputs()[i];
         std::basic_string<char> operatorInputName  = padOp->GetInputTensorNames()[i];
 
-        std::string opStr = "input" + std::to_string(i) + "_";
+        std::string opStr = "input_";
 
         CHECK(blockInputName == operatorInputName);
         CHECK(basicBlock->GetTensorByName(blockInputName));
diff --git a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
index 991ef15..6ad6ea8 100644
--- a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
@@ -77,8 +77,7 @@
     input0->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
-    TosaSerializationBasicBlock* basicBlock =
-        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
+    TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
     VerifyAvgPool2DIgnoreValue(basicBlock,
                               inputShape,
                               outputShape,
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index 267c9fb..8665aa9 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -786,7 +786,7 @@
     CHECK(basicBlock->GetOperators().size() == 3);
     CHECK(basicBlock->GetTensors().size() == 4);
 
-    CHECK(basicBlock->GetInputs()[0].find("input0_") != std::string::npos);
+    CHECK(basicBlock->GetInputs()[0].find("input_") != std::string::npos);
     CHECK(basicBlock->GetInputs()[1].find("constant_") != std::string::npos);
     CHECK(basicBlock->GetInputs()[2].find("constant_") != std::string::npos);
     CHECK(basicBlock->GetOutputs()[0].find("output0_") != std::string::npos);
@@ -848,7 +848,7 @@
     CHECK(basicBlock->GetOperators().size() == 3);
     CHECK(basicBlock->GetTensors().size() == 4);
 
-    CHECK(basicBlock->GetInputs()[0].find("input0_") != std::string::npos);
+    CHECK(basicBlock->GetInputs()[0].find("input_") != std::string::npos);
     CHECK(basicBlock->GetInputs()[1].find("constant_") != std::string::npos);
     CHECK(basicBlock->GetInputs()[2].find("constant_") != std::string::npos);
     CHECK(basicBlock->GetOutputs()[0].find("output0_") != std::string::npos);
diff --git a/src/backends/tosaCommon/test/SplitChecker.hpp b/src/backends/tosaCommon/test/SplitChecker.hpp
index edef4a1..4a4eeba 100644
--- a/src/backends/tosaCommon/test/SplitChecker.hpp
+++ b/src/backends/tosaCommon/test/SplitChecker.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -38,7 +38,7 @@
         std::basic_string<char> blockInputName = splitBlock->GetInputs()[0];
         std::basic_string<char> operatorInputName = sliceOp->GetInputTensorNames()[0];
 
-        std::string opInputStr = "input" + std::to_string(0) + "_";
+        std::string opInputStr = "input_";
 
         CHECK(blockInputName == operatorInputName);
         CHECK(splitBlock->GetTensorByName(blockInputName));
diff --git a/src/backends/tosaCommon/test/TosaTestUtils.hpp b/src/backends/tosaCommon/test/TosaTestUtils.hpp
index 05dd164..a0eec74 100644
--- a/src/backends/tosaCommon/test/TosaTestUtils.hpp
+++ b/src/backends/tosaCommon/test/TosaTestUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -284,20 +284,20 @@
     CHECK(op->GetInputTensorNames().size() == numInputTensors);
     CHECK(op->GetOutputTensorNames().size() == numOutputs);
 
-    for (uint32_t i = 0; i < numInputs; i++)
+    for (uint32_t i = 0; i < numInputs; ++i)
     {
         std::basic_string<char> blockInputName = basicBlock->GetInputs()[i];
         std::basic_string<char> operatorInputName  = op->GetInputTensorNames()[i];
         std::basic_string<char> tensorName = basicBlock->GetTensors()[i]->GetName();
 
-        std::string opStr = "input" + std::to_string(i) + "_";
+        std::string opStr = "input_";
 
         CHECK(blockInputName == operatorInputName);
         CHECK(tensorName == operatorInputName);
         CHECK(blockInputName.find(opStr) != std::string::npos);
     }
 
-    for (uint32_t i = 0; i < numOutputs; i++)
+    for (uint32_t i = 0; i < numOutputs; ++i)
     {
         std::basic_string<char> blockOutputName = basicBlock->GetOutputs()[i];
         std::basic_string<char> operatorOutputName  = op->GetOutputTensorNames()[i];