IVGCVSW-7204 Add TransposeConv2d support to TOSA Reference Backend

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I9bfd597afd41468f304edfbe5d7141378ce60d4f
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index e12813a..47d6c28 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -147,3 +147,71 @@
                                                 { { 0, qExpectedOutputData } },
                                                 backends);
 }
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
+void SimpleTransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
+                                          armnn::DataLayout dataLayout)
+{
+    using namespace armnn;
+    using T = ResolveType<ArmnnType>;
+
+    const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
+    const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
+
+    TensorInfo inputInfo({1, 2, 2, 1}, ArmnnType, qScale, qOffset, true);
+    TensorInfo outputInfo({1, 3, 3, 1}, ArmnnType, qScale, qOffset);
+    TensorInfo weightsInfo({1, 2, 2, 1}, ArmnnType, qScale, qOffset, true);
+    TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
+
+    std::vector<float> inputData =
+    {
+        1, 2, 3, 4
+    };
+
+    std::vector<float> weightsData =
+    {
+        0, 1, 2, 4
+    };
+    std::vector<float> biasesData = { 0.f };
+
+    std::vector<float> expectedOutputData =
+    {
+        0, 1,  2,
+        2, 11, 12,
+        6, 20, 16
+    };
+
+    TransposeConvolution2dDescriptor descriptor;
+    descriptor.m_PadLeft     = 0;
+    descriptor.m_PadRight    = 0;
+    descriptor.m_PadTop      = 0;
+    descriptor.m_PadBottom   = 0;
+    descriptor.m_StrideX     = 1;
+    descriptor.m_StrideY     = 1;
+    descriptor.m_BiasEnabled = true;
+    descriptor.m_DataLayout  = dataLayout;
+    descriptor.m_OutputShapeEnabled = true;
+    descriptor.m_OutputShape = { 1, 3, 3, 1 };
+
+    // quantize data
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
+
+    using BT = ResolveType<ArmnnBType>;
+    std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
+
+    ConstTensor weights(weightsInfo, qWeightsData);
+    ConstTensor biases(biasesInfo, qBiasesData);
+
+    INetworkPtr network = CreateTransposeConvolution2dNetwork(descriptor,
+                                                              inputInfo,
+                                                              outputInfo,
+                                                              weights,
+                                                              Optional<ConstTensor>(biases));
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+                                                { { 0, qInputData } },
+                                                { { 0, qExpectedOutputData } },
+                                                backends);
+}
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 15629ff..7ecf726 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -67,6 +67,11 @@
             auto sliceDesc = PolymorphicDowncast<const SliceDescriptor*>(&descriptor);
             return ConvertSliceToTosaOperator(layer, inputs, outputs, sliceDesc);
         }
+        case LayerType::TransposeConvolution2d:
+        {
+            auto transposeConv2dDesc = PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor);
+            return ConvertTransposeConv2dToTosaOperator(layer, inputs, outputs, transposeConv2dDesc);
+        }
         default:
         {
             return CreateEmptyTosaSerializationBasicBlock();
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index cb1d68e..90c1a4f 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -19,6 +19,8 @@
         SliceOperator.hpp
         SliceOperator.cpp
         TosaOperatorUtils.hpp
+        TransposeConv2dOperator.hpp
+        TransposeConv2dOperator.cpp
     )
 
 add_library(armnnTosaBackendOperators OBJECT ${armnnTosaBackendOperators_sources})
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index a3597f0..1a9d6be 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -11,4 +11,5 @@
 #include "AvgPool2DIgnoreValueOperator.hpp"
 #include "Pooling2DOperator.hpp"
 #include "ReshapeOperator.hpp"
-#include "SliceOperator.hpp"
\ No newline at end of file
+#include "SliceOperator.hpp"
+#include "TransposeConv2dOperator.hpp"
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index 288966b..be2f53e 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -75,6 +75,23 @@
     }
 }
 
+// Function that generates unique output name using the layer type, input slot and layer guid.
+inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot)
+{
+    Layer& connectedLayer = layer.GetOutputSlot().GetConnection(0)->GetOwningLayer();
+
+    // Get the layer connected to the output slot, if output use that layer and id,
+    // otherwise use current layer and id.
+    if(connectedLayer.GetType() == LayerType::Output)
+    {
+        return GenerateUniqueName(connectedLayer, layerSlot);
+    }
+    else
+    {
+        return GenerateUniqueName(layer, layerSlot);
+    }
+}
+
 // Function to return unique int as a string to ensure uniqueness between all input, output and block names.
 static int uniqueTosaMappingID = 0;
 inline std::string GetUniqueTosaMappingID()
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
new file mode 100644
index 0000000..a0d58e2
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TransposeConv2dOperator.hpp"
+
+#include "layers/TransposeConvolution2dLayer.hpp"
+
+TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* layer,
+                                                                  const std::vector<const TensorInfo*>& inputs,
+                                                                  const std::vector<const TensorInfo*>& outputs,
+                                                                  const TransposeConvolution2dDescriptor* descriptor)
+{
+    std::string input0Name = std::string("input0_");
+    std::string input1Name = std::string("constant_") + GetUniqueTosaMappingID();
+    std::string input2Name = std::string("constant_") + GetUniqueTosaMappingID();
+    std::string outputName = std::string("output0_");
+    std::string blockName  = std::string("Op_TRANSPOSE_CONV2D_block_") + GetUniqueTosaMappingID();
+
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
+    {
+        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        input0Name = GenerateUniqueName(connectedInputLayer, 0);
+
+        outputName = GenerateUniqueOutputName(*layer, 0);
+    }
+
+    std::vector<TosaSerializationTensor*> tensors;
+    std::vector<TosaSerializationOperator*> operators;
+
+    // Setup input tensor
+    // Only add tensor if connected layer is an input layer.
+    // As intermediate or constant tensors will be created separately.
+    // There also can't be duplicate tensors.
+    if(input0Name.find("input0_") != std::string::npos)
+    {
+        std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+        DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
+    }
+
+    // Setup weights tensor, constant data will get copied during SetConstantTensorData
+    operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {input1Name}));
+
+    // During validation the TensorInfo can be retrieved from the inputs.
+    // During execution, it is only available through the layer so use m_Weight.
+    if(layer == nullptr)
+    {
+        std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+        DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
+    }
+    else
+    {
+        auto transposeConv2dLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(layer);
+
+        std::vector<int32_t> inputShape1 = GetTosaTensorShape(
+                transposeConv2dLayer->m_Weight->GetTensorInfo().GetShape());
+        DType inputDType1 = ArmNNToDType(transposeConv2dLayer->m_Weight->GetTensorInfo().GetDataType());
+
+        std::vector<uint8_t> uint8Data = ConvertConstantTensorDataToBuffer(transposeConv2dLayer->m_Weight);
+        tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, uint8Data));
+    }
+
+    // Setup bias operator and tensor, constant data will get copied during SetConstantTensorData
+    operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {input2Name}));
+
+    // During validation the TensorInfo can be retrieved from the inputs.
+    // During execution, it is only available through the layer so use m_Bias.
+    if(layer == nullptr && descriptor->m_BiasEnabled)
+    {
+        std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
+        DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(input2Name, inputShape2, inputDType2, {}));
+    }
+    else if(descriptor->m_BiasEnabled)
+    {
+        auto transposeConv2dLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(layer);
+
+        std::vector<int32_t> inputShape2 = GetTosaTensorShape(
+                transposeConv2dLayer->m_Bias->GetTensorInfo().GetShape());
+        DType inputDType2 = ArmNNToDType(transposeConv2dLayer->m_Bias->GetTensorInfo().GetDataType());
+
+        std::vector<uint8_t> uint8Data = ConvertConstantTensorDataToBuffer(transposeConv2dLayer->m_Bias);
+        tensors.push_back(new TosaSerializationTensor(input2Name, inputShape2, inputDType2, uint8Data));
+    }
+    else
+    {
+        // If bias is disabled, create a constant bias tensor of 0's as three inputs are required.
+        // The size of the bias must match the channels dimension, so get the correct index.
+        unsigned int index = (descriptor->m_DataLayout == DataLayout::NHWC) ?
+                outputs[0]->GetShape()[3] : outputs[0]->GetShape()[1];
+
+        std::vector<uint8_t> uint8Data;
+        std::vector<float> data(outputs[0]->GetShape()[index], 0.0f);
+
+        TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+
+        tensors.push_back(new TosaSerializationTensor(input2Name,
+                                                      {static_cast<int32_t>(outputs[0]->GetShape()[index])},
+                                                      DType_FP32,
+                                                      uint8Data));
+    }
+
+    // Setup Output Tensor
+    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+    // Set up TRANSPOSE_CONV2D operator
+    // The TOSA Reference Model pads the output shape, so it is added to output shape.
+    // In Arm NN we pad the input shape, so it is taken away.
+    // To offset this the negative padding value can be used.
+    std::vector<int> pad = {-static_cast<int>(descriptor->m_PadTop),
+                            -static_cast<int>(descriptor->m_PadBottom),
+                            -static_cast<int>(descriptor->m_PadLeft),
+                            -static_cast<int>(descriptor->m_PadRight)};
+    std::vector<int> stride = {static_cast<int>(descriptor->m_StrideY),
+                               static_cast<int>(descriptor->m_StrideX)};
+
+    std::vector<int> outputShape;
+    // If available use shape in descriptor otherwise use output shape.
+    if (descriptor->m_OutputShape.size() == 4)
+    {
+        for (uint32_t i = 0; i < descriptor->m_OutputShape.size(); ++i)
+        {
+            outputShape.push_back(static_cast<int>(descriptor->m_OutputShape[i]));
+        }
+    }
+    else
+    {
+        for (uint32_t i = 0; i < outputs[0]->GetNumDimensions(); ++i)
+        {
+            outputShape.push_back(static_cast<int>(outputs[0]->GetShape()[i]));
+        }
+    }
+
+    TosaTransposeConvAttribute attribute(pad, stride, outputShape, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
+
+    auto* op = new TosaSerializationOperator(Op_TRANSPOSE_CONV2D,
+                                             Attribute_TransposeConvAttribute,
+                                             &attribute,
+                                             {input0Name, input1Name, input2Name},
+                                             {outputName});
+    operators.push_back(op);
+
+    // operatorInputNames/operatorOutputNames ends up being the same as
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+    return new TosaSerializationBasicBlock(blockName,                            // name
+                                           operators,                            // operators
+                                           tensors,                              // tensors
+                                           {input0Name, input1Name, input2Name}, // inputs
+                                           {outputName});                        // outputs
+}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp
new file mode 100644
index 0000000..eb911a1
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator(const Layer* layer,
+                                                                  const std::vector<const TensorInfo*>& inputs,
+                                                                  const std::vector<const TensorInfo*>& outputs,
+                                                                  const TransposeConvolution2dDescriptor* descriptor);
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index 0d19a32..2b0c1e5 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -438,6 +438,108 @@
 }
 
 
+TEST_CASE("GetTosaMapping_TransposeConv2dLayer")
+{
+    const TensorInfo inputInfo ({ 1, 7, 7, 1 }, DataType::Float32);
+    const TensorInfo outputInfo({ 1, 9, 9, 1 }, DataType::Float32);
+    const TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+    const TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
+
+    TransposeConvolution2dDescriptor descriptor;
+    descriptor.m_PadLeft     = 1;
+    descriptor.m_PadRight    = 1;
+    descriptor.m_PadTop      = 1;
+    descriptor.m_PadBottom   = 1;
+    descriptor.m_StrideX     = 1;
+    descriptor.m_StrideY     = 1;
+    descriptor.m_BiasEnabled = true;
+    descriptor.m_DataLayout  = DataLayout::NHWC;
+
+    TosaSerializationBasicBlock* basicBlock = GetTosaMapping(nullptr,
+                                                             LayerType::TransposeConvolution2d,
+                                                             {&inputInfo, &weightsInfo, &biasesInfo},
+                                                             {&outputInfo},
+                                                             descriptor);
+
+    CHECK(basicBlock->GetInputs().size() == 3);
+    CHECK(basicBlock->GetOutputs().size() == 1);
+    CHECK(basicBlock->GetOperators().size() == 3);
+    CHECK(basicBlock->GetTensors().size() == 4);
+
+    CHECK(basicBlock->GetInputs()[0].find("input0_") != std::string::npos);
+    CHECK(basicBlock->GetInputs()[1].find("constant_") != std::string::npos);
+    CHECK(basicBlock->GetInputs()[2].find("constant_") != std::string::npos);
+    CHECK(basicBlock->GetOutputs()[0].find("output0_") != std::string::npos);
+
+    VerifyTosaAttribute(descriptor,
+                        basicBlock->GetOperators().at(2)->GetAttribute(),
+                        {},
+                        {},
+                        LayerType::TransposeConvolution2d);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_TransposeConv2dLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    const TensorInfo inputInfo ({ 1, 7, 7, 1 }, DataType::Float32);
+    const TensorInfo outputInfo({ 1, 9, 9, 1 }, DataType::Float32);
+    const TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+    const TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
+
+    std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
+    ConstTensor weights(weightsInfo, weightsData);
+
+    std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
+    ConstTensor biases(biasesInfo, biasesData);
+
+    TransposeConvolution2dDescriptor descriptor;
+    descriptor.m_PadLeft     = 1;
+    descriptor.m_PadRight    = 1;
+    descriptor.m_PadTop      = 1;
+    descriptor.m_PadBottom   = 1;
+    descriptor.m_StrideX     = 1;
+    descriptor.m_StrideY     = 1;
+    descriptor.m_BiasEnabled = true;
+    descriptor.m_DataLayout  = DataLayout::NHWC;
+
+    IConnectableLayer* const inputLayer  = net->AddInputLayer(0);
+    IConnectableLayer* const convLayer   =
+            net->AddTransposeConvolution2dLayer(descriptor,
+                                                weights,
+                                                Optional<ConstTensor>(biases),
+                                                "transposeConvolution2d");
+    IConnectableLayer* const outputLayer = net->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+    convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(convLayer));
+
+    CHECK(basicBlock->GetInputs().size() == 3);
+    CHECK(basicBlock->GetOutputs().size() == 1);
+    CHECK(basicBlock->GetOperators().size() == 3);
+    CHECK(basicBlock->GetTensors().size() == 4);
+
+    CHECK(basicBlock->GetInputs()[0].find("input0_") != std::string::npos);
+    CHECK(basicBlock->GetInputs()[1].find("constant_") != std::string::npos);
+    CHECK(basicBlock->GetInputs()[2].find("constant_") != std::string::npos);
+    CHECK(basicBlock->GetOutputs()[0].find("output0_") != std::string::npos);
+
+    VerifyTosaAttribute(descriptor,
+                        basicBlock->GetOperators().at(2)->GetAttribute(),
+                        {},
+                        {},
+                        LayerType::TransposeConvolution2d);
+}
+
 TEST_CASE("GetTosaMapping_Unimplemented")
 {
     TosaSerializationBasicBlock* basicBlock =
diff --git a/src/backends/tosaCommon/test/TosaTestUtils.hpp b/src/backends/tosaCommon/test/TosaTestUtils.hpp
index 93b9e7d..140cb83 100644
--- a/src/backends/tosaCommon/test/TosaTestUtils.hpp
+++ b/src/backends/tosaCommon/test/TosaTestUtils.hpp
@@ -144,6 +144,20 @@
 
             break;
         }
+        case LayerType::TransposeConvolution2d:
+        {
+            auto transposeConv2dDesc = PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor);
+            std::vector<int> outPad = {-static_cast<int>(transposeConv2dDesc->m_PadTop),
+                                       -static_cast<int>(transposeConv2dDesc->m_PadBottom),
+                                       -static_cast<int>(transposeConv2dDesc->m_PadLeft),
+                                       -static_cast<int>(transposeConv2dDesc->m_PadRight)};
+            std::vector<int> stride = {static_cast<int>(transposeConv2dDesc->m_StrideY),
+                                       static_cast<int>(transposeConv2dDesc->m_StrideX)};
+            TosaTransposeConvAttribute transposeConvAttribute(attribute);
+            CHECK(outPad == transposeConvAttribute.out_pad());
+            CHECK(stride == transposeConvAttribute.stride());
+            break;
+        }
         default:
             break;
     }
@@ -167,12 +181,7 @@
     // The number of tensors in the block can be different if there are constant layers, as they are created separately.
     if(type == LayerType::Convolution2d)
     {
-        numInputTensors = 2;
-        auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
-        if(conv2dDesc->m_BiasEnabled)
-        {
-            numInputTensors = 3;
-        }
+        numInputTensors = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor)->m_BiasEnabled ? 3 : 2;
     }
 
     std::string blockStr = operatorString + "_block_";
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index 928a19c..e5427eb 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -66,6 +66,19 @@
             inputInfos.push_back(&infos[0]);
             outputInfos.push_back(&infos[1]);
             break;
+        case LayerType::TransposeConvolution2d:
+        {
+            inputInfos.push_back(&infos[0]); // input
+            outputInfos.push_back(&infos[1]); // output
+            inputInfos.push_back(&infos[2]); // weights
+
+            auto conv2dDesc = PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor);
+            if(conv2dDesc->m_BiasEnabled)
+            {
+                inputInfos.push_back(&infos[3]); // bias
+            }
+            break;
+        }
         default:
             break;
     }
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index 2f12310..00c0386 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -10,6 +10,7 @@
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
 #include "backendsCommon/test/SliceEndToEndTestImpl.hpp"
+#include "backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp"
 
 #include <doctest/doctest.h>
 
@@ -108,4 +109,17 @@
     SliceEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
 }
 
+// TransposeConvolution2d
+TEST_CASE("TosaRefTransposeConvolution2dEndToEndFloatNhwcTest")
+{
+    TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
+        tosaDefaultBackends, armnn::DataLayout::NHWC);
+}
+
+TEST_CASE("TosaRefSimpleTransposeConvolution2dEndToEndFloatNhwcTest")
+{
+    SimpleTransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
+        tosaDefaultBackends, armnn::DataLayout::NHWC);
+}
+
 }
\ No newline at end of file
diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
index 0d0cd6e..3c3abc2 100644
--- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
@@ -105,7 +105,7 @@
 
     TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Convolution2d,
                                                      {inputInfo, outputInfo, weightsInfo, biasesInfo},
                                                      desc,
                                                      EmptyOptional(),
@@ -128,7 +128,7 @@
 
     TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Convolution2d,
                                                      {inputInfo, outputInfo, weightsInfo, biasesInfo},
                                                      desc,
                                                      EmptyOptional(),
@@ -150,7 +150,7 @@
     desc.m_PoolWidth = 1;
     desc.m_StrideX = 1;
     desc.m_StrideY = 1;
-    desc.m_PoolType = armnn::PoolingAlgorithm::Max;
+    desc.m_PoolType = PoolingAlgorithm::Max;
 
     TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
@@ -324,4 +324,49 @@
     CHECK(!supported);
 }
 
+TEST_CASE("IsLayerSupportedTosaReferenceTransposeConv2d")
+{
+    TensorInfo inputInfo ({ 1, 3, 3, 1 }, DataType::Float32);
+    TensorInfo outputInfo({ 1, 5, 5, 1 }, DataType::Float32);
+    TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    TensorInfo biasesInfo ({ 1 }, DataType::Float32);
+
+    TransposeConvolution2dDescriptor desc;
+    desc.m_StrideX = 1;
+    desc.m_StrideY = 1;
+    desc.m_BiasEnabled = true;
+
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::TransposeConvolution2d,
+                                                     {inputInfo, outputInfo, weightsInfo, biasesInfo},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceTransposeConv2dUnsupported")
+{
+    // If inputs and weights are Fp32, output must match.
+    TensorInfo inputInfo ({ 1, 3, 3, 1 }, DataType::Float32);
+    TensorInfo outputInfo({ 1, 5, 5, 1 }, DataType::Float32);
+    TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+    TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
+
+    TransposeConvolution2dDescriptor desc;
+    desc.m_BiasEnabled = true;
+
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::TransposeConvolution2d,
+                                                     {inputInfo, outputInfo, weightsInfo, biasesInfo},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+    CHECK(!supported);
+}
+
 }