BugFix: calculate explicit padding for Transpose Convolution using output size

* If the output shape is given in Transpose convolution, use it to calculate the padding


Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I0bf3dee94c2ce606ed67fb385018b220188c3017
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index f79c588..534d6b4 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -98,20 +98,25 @@
     ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
 
     std::vector<TensorShape> expectedOutputShape;
+    std::vector<TensorShape> outputShapeGivenAsInput;
+
+    expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+                                             m_Weight->GetTensorInfo().GetShape() });
+
+    ARMNN_ASSERT(expectedOutputShape.size() == 1);
+
     // If output_shape was specified then use it rather than calculate an inferred output shape.
     if (m_Param.m_OutputShapeEnabled)
     {
         TensorShape shapeAsTensorShape(static_cast<unsigned int>(m_Param.m_OutputShape.size()),
             m_Param.m_OutputShape.data());
-        expectedOutputShape.push_back(shapeAsTensorShape);
-    }
-    else
-    {
-        expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
-                                                 m_Weight->GetTensorInfo().GetShape() });
-    }
+        outputShapeGivenAsInput.push_back(shapeAsTensorShape);
 
-    ARMNN_ASSERT(expectedOutputShape.size() == 1);
+        ARMNN_ASSERT(outputShapeGivenAsInput.size() == 1);
+        ARMNN_ASSERT_MSG(expectedOutputShape == outputShapeGivenAsInput,
+                         "TransposeConvolution2dLayer: output calculated by InferOutputShapes and "
+                         "the output given as an input parameter to the layer are not matching");
+    }
 
     ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
 }
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 2a7f049..244f1fa 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -411,6 +411,29 @@
     }
 }
 
+// Function that calculates explicit padding when the output shape is known.
+// At the moment the output is only given as an input parameter in Transpose Convolution,
+// not in Convolution and Depthwise Convolution
+void CalcPadding(uint32_t inputSize,
+                 uint32_t filterSize,
+                 uint32_t stride,
+                 uint32_t dilation,
+                 uint32_t& paddingFront,
+                 uint32_t& paddingBack,
+                 tflite::Padding padding,
+                 uint32_t outputSize)
+{
+    IgnoreUnused(dilation);
+    paddingFront = 0;
+    paddingBack = 0;
+    if (padding == tflite::Padding_SAME)
+    {
+        uint32_t totalPadding = (inputSize - 1) * stride + filterSize - outputSize;
+        paddingFront = totalPadding / 2;
+        paddingBack = totalPadding - paddingFront;
+    }
+}
+
 armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
                                const std::vector<unsigned int>& shape,
                                const bool outputTensor = false)
@@ -1608,6 +1631,17 @@
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
+
+    armnn::TensorInfo inputTensorInfo  = InputTensorInfo(subgraphIndex, operatorIndex, 2);
+    armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
+
+    // TfLite uses NHWC tensors
+    const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+    const unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
+
+    const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+    const unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
+
     // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
     // And the tensor is a constant, we can access the data at load time and set the output shape of the
     // layer. If this is not constant, We do not have access to the shape data, so we have to use
@@ -1634,32 +1668,47 @@
             desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
         }
         desc.m_OutputShapeEnabled = true;
+
+        // TfLite uses NHWC tensors
+        const unsigned int outputHeight = desc.m_OutputShape[1];
+        const unsigned int outputWidth  = desc.m_OutputShape[2];
+
+        CalcPadding(inputHeight,
+                    filterHeight,
+                    desc.m_StrideY,
+                    1, // DilationY
+                    desc.m_PadTop,
+                    desc.m_PadBottom,
+                    options->padding,
+                    outputHeight);
+
+        CalcPadding(inputWidth,
+                    filterWidth,
+                    desc.m_StrideX,
+                    1, // DilationX
+                    desc.m_PadLeft,
+                    desc.m_PadRight,
+                    options->padding,
+                    outputWidth);
     }
-    armnn::TensorInfo inputTensorInfo  = InputTensorInfo(subgraphIndex, operatorIndex, 2);
-    armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
+    else
+    {
+        CalcPadding(inputHeight,
+                    filterHeight,
+                    desc.m_StrideY,
+                    1, // DilationY
+                    desc.m_PadTop,
+                    desc.m_PadBottom,
+                    options->padding);
 
-    // TfLite uses NHWC tensors
-    const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
-    const unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
-
-    const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
-    const unsigned int filterWidth  = filterTensorInfo.GetShape()[2];
-
-    CalcPadding(inputHeight,
-                filterHeight,
-                desc.m_StrideY,
-                1, // DilationY
-                desc.m_PadTop,
-                desc.m_PadBottom,
-                options->padding);
-
-    CalcPadding(inputWidth,
-                filterWidth,
-                desc.m_StrideX,
-                1, // DilationX
-                desc.m_PadLeft,
-                desc.m_PadRight,
-                options->padding);
+        CalcPadding(inputWidth,
+                    filterWidth,
+                    desc.m_StrideX,
+                    1, // DilationX
+                    desc.m_PadLeft,
+                    desc.m_PadRight,
+                    options->padding);
+    }
 
     auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());