IVGCVSW-7168 Support simple model in the TOSA Reference Backend

 * Fixed issue where duplicate tensors where being created.
 * Fixed issue where output name could be generated with the wrong id.
 * Updated bias tensor for Conv2d, so the size matches the channel.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I1de6947e036b3e629ec6446d24d69e50603a5593
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
index f1fb34c..20ba146 100644
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
@@ -26,8 +26,7 @@
         input1Name = GenerateUniqueName(connectedLayer1, 1);
 
         // Get the layer connected to the output slot and determine unique layer name.
-        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
-        outputName = GenerateUniqueName(connectedOutputLayer, 0);
+        outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
     auto* op = new TosaSerializationOperator(Op_ADD,
@@ -36,24 +35,38 @@
                                              {input0Name, input1Name},
                                              {outputName});
 
-    std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
-    DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
 
-    std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
-    DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+    std::vector<TosaSerializationTensor*> tensors;
+
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    if(input0Name.find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
+    }
+
+    if(input1Name.find("input1_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+        DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
+    }
 
     std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
 
-    auto* inputTensor0  = new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {});
-    auto* inputTensor1  = new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {});
-    auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {op}, // operators
-                                           {inputTensor0, inputTensor1, outputTensor0}, // tensors
+                                           tensors, // tensors
                                            {input0Name, input1Name}, // inputs
                                            {outputName}); // outputs
 }
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
index 7e7631d..d268c2f 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
@@ -24,8 +24,7 @@
         padInputName = GenerateUniqueName(connectedInputLayer, 0);
 
         // Get the layer connected to the output slot and determine unique layer name.
-        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
-        poolOutputName = GenerateUniqueName(connectedOutputLayer, 0);
+        poolOutputName = GenerateUniqueOutputName(*layer, 0);
     }
 
     std::vector<int> paddings;
@@ -74,9 +73,19 @@
                                                  {padOutputName},
                                                  {poolOutputName});
 
+    std::vector<TosaSerializationTensor*> tensors;
+
     std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
     DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
 
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    if(padInputName.find("input0_") != std::string::npos)
+    {
+        tensors.push_back(new TosaSerializationTensor(padInputName, inputShape, inputDType, {}));
+    }
+
     std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
 
@@ -96,15 +105,14 @@
                              inputShape[3] + paddings[6] + paddings[7]};
     }
 
-    auto* inputTensor        = new TosaSerializationTensor(padInputName, inputShape, inputDType, {});
-    auto* intermediateTensor = new TosaSerializationTensor(padOutputName, intermediateShape, inputDType, {});
-    auto* outputTensor       = new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {});
+    tensors.push_back(new TosaSerializationTensor(padOutputName, intermediateShape, inputDType, {}));
+    tensors.push_back(new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {}));
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {opPad, opPool}, // operators
-                                           {inputTensor, intermediateTensor, outputTensor}, // tensors
+                                           tensors, // tensors
                                            {padInputName}, // inputs
                                            {poolOutputName}); // outputs
 }
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
index 9c095d6..dadd91b 100644
--- a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
@@ -39,19 +39,23 @@
         }
 
         // Get the layer connected to the output slot and determine unique layer name.
-        Layer& connectedLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
-
-        outputName = GenerateUniqueName(connectedLayer, 0);
+        outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
     std::vector<TosaSerializationTensor*> tensors;
     std::vector<TosaSerializationOperator*> operators;
 
     // Setup input Tensor
-    std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
-    DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+    // Only add tensor if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensors.
+    if(inputNames[0].find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
 
-    tensors.push_back(new TosaSerializationTensor(inputNames[0], inputShape0, inputDType0, {}));
+        tensors.push_back(new TosaSerializationTensor(inputNames[0], inputShape0, inputDType0, {}));
+    }
 
     // Only add input tensors if weights and bias are not constant or if running validation.
     // Constant tensors will be created in the ConvertConstantToTosaOperator function.
@@ -80,12 +84,18 @@
 
         operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {constantName}));
 
+        // The size of the bias must match the channels dimension, so get the correct index.
+        unsigned int index = (conv2dDescriptor->m_DataLayout == DataLayout::NHWC) ? 3 : 1;
+
         std::vector<uint8_t> uint8Data;
-        std::vector<float> data = { 0.0 };
+        std::vector<float> data(outputs[0]->GetShape()[index], 0.0f);
 
         TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
 
-        tensors.push_back(new TosaSerializationTensor(constantName, {1}, DType_FP32, uint8Data));
+        tensors.push_back(new TosaSerializationTensor(constantName,
+                                                      {static_cast<int32_t>(outputs[0]->GetShape()[index])},
+                                                      DType_FP32,
+                                                      uint8Data));
         inputNames.emplace_back(constantName);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
index 265901e..ee02425 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
@@ -26,8 +26,7 @@
         input0Name = GenerateUniqueName(connectedInputLayer, 0);
 
         // Get the layer connected to the output slot and determine unique layer name.
-        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
-        outputName = GenerateUniqueName(connectedOutputLayer, 0);
+        outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
     std::vector<int> pad = {static_cast<int>(poolDescriptor->m_PadTop),
@@ -46,20 +45,29 @@
                                              {input0Name},
                                              {outputName});
 
-    std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
-    DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+    std::vector<TosaSerializationTensor*> tensors;
+
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    if(input0Name.find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
+    }
 
     std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
 
-    auto* inputTensor0  = new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {});
-    auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {op}, // operators
-                                           {inputTensor0, outputTensor0}, // tensors
+                                           tensors, // tensors
                                            {input0Name}, // inputs
                                            {outputName}); // outputs
 }
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
index b88a6ef..3027e2e 100644
--- a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
@@ -23,8 +23,7 @@
         inputName = GenerateUniqueName(connectedLayer, 0);
 
         // Get the layer connected to the output slot and determine unique layer name.
-        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
-        outputName = GenerateUniqueName(connectedOutputLayer, 0);
+        outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
     TosaReshapeAttribute attribute(GetTosaTensorShape(reshapeDescriptor->m_TargetShape));
@@ -35,20 +34,29 @@
                                              {inputName},
                                              {outputName});
 
-    std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
-    DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+    std::vector<TosaSerializationTensor*> tensors;
+
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    if(inputName.find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(inputName, inputShape, inputDType, {}));
+    }
 
     std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
 
-    auto* inputTensor  = new TosaSerializationTensor(inputName, inputShape, inputDType, {});
-    auto* outputTensor = new TosaSerializationTensor(outputName, outputShape, outputDType, {});
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape, outputDType, {}));
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {op}, // operators
-                                           {inputTensor, outputTensor}, // tensors
+                                           tensors, // tensors
                                            {inputName}, // inputs
                                            {outputName}); // outputs
 }
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
index fc2e40a..742ba88 100644
--- a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
@@ -23,8 +23,7 @@
         inputName = GenerateUniqueName(connectedLayer, 0);
 
         // Get the layer connected to the output slot and determine unique layer name.
-        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
-        outputName = GenerateUniqueName(connectedOutputLayer, 0);
+        outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
     std::vector<int32_t> begin(sliceDescriptor->m_Begin.begin(), sliceDescriptor->m_Begin.end());
@@ -38,20 +37,29 @@
                                              {inputName},
                                              {outputName});
 
-    std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
-    DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+    std::vector<TosaSerializationTensor*> tensors;
+
+    // Only add input tensors if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensor.
+    if(inputName.find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(inputName, inputShape, inputDType, {}));
+    }
 
     std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
 
-    auto* inputTensor  = new TosaSerializationTensor(inputName, inputShape, inputDType, {});
-    auto* outputTensor = new TosaSerializationTensor(outputName, outputShape, outputDType, {});
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape, outputDType, {}));
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {op}, // operators
-                                           {inputTensor, outputTensor}, // tensors
+                                           tensors, // tensors
                                            {inputName}, // inputs
                                            {outputName}); // outputs
 }
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
index a0d58e2..1ad8c95 100644
--- a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
@@ -94,8 +94,7 @@
     {
         // If bias is disabled, create a constant bias tensor of 0's as three inputs are required.
         // The size of the bias must match the channels dimension, so get the correct index.
-        unsigned int index = (descriptor->m_DataLayout == DataLayout::NHWC) ?
-                outputs[0]->GetShape()[3] : outputs[0]->GetShape()[1];
+        unsigned int index = (descriptor->m_DataLayout == DataLayout::NHWC) ? 3 : 1;
 
         std::vector<uint8_t> uint8Data;
         std::vector<float> data(outputs[0]->GetShape()[index], 0.0f);