IVGCVSW-7168 Add Conv2d and Constant support to TOSA Reference Backend

 * Added TOSA Conv2d and Constant mappings.
 * Added unique naming to mappings based on previous and following
   layers, so they are connected correctly.
 * Updated existing mappings with new naming convention.
 * Added all mappings to one main block in OptimizeSubgraphView.
 * Removed isMain from mapping functions.
 * Added Conv2d EndToEnd test.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I27c3e238407c32379ce25a1f01dad11523ef5d2b
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index c9668a2..d833caa 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -14,6 +14,7 @@
     ComparisonEndToEndTestImpl.hpp
     CompatibilityTests.cpp
     ConcatEndToEndTestImpl.hpp
+    Convolution2dEndToEndTestImpl.hpp
     Convolution3dEndToEndTestImpl.hpp
     CustomMemoryOptimizerStrategyTests.cpp
     DefaultAsyncExecuteTest.cpp
diff --git a/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp
new file mode 100644
index 0000000..bc9a942
--- /dev/null
+++ b/src/backends/backendsCommon/test/Convolution2dEndToEndTestImpl.hpp
@@ -0,0 +1,134 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "EndToEndTestImpl.hpp"
+#include <armnnUtils/QuantizeHelper.hpp>
+
+#include <ResolveType.hpp>
+
+#include <CommonTestUtils.hpp>
+#include <armnnTestUtils/DataLayoutUtils.hpp>
+
+#include <map>
+#include <vector>
+
+namespace
+{
+
+armnn::INetworkPtr CreateConstConvolution2dNetwork(const armnn::Convolution2dDescriptor& descriptor,
+                                                   const armnn::TensorInfo& inputInfo,
+                                                   const armnn::TensorInfo& weightsInfo,
+                                                   const armnn::TensorInfo& biasInfo,
+                                                   const armnn::TensorInfo& outputInfo,
+                                                   const armnn::ConstTensor& weights,
+                                                   const armnn::ConstTensor& biases,
+                                                   bool biasEnabled)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+    IConnectableLayer* input = network->AddInputLayer(0, "input");
+    IConnectableLayer* weightsLayer = network->AddConstantLayer(weights, "Weights");
+    IConnectableLayer* convolution2d = network->AddConvolution2dLayer(descriptor, "convolution2d");
+    IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+    Connect(input, convolution2d, inputInfo, 0, 0);
+    Connect(weightsLayer, convolution2d, weightsInfo, 0, 1);
+
+    if(biasEnabled)
+    {
+        armnn::IConnectableLayer* biasLayer = network->AddConstantLayer(biases, "Bias");
+        Connect(biasLayer, convolution2d, biasInfo, 0, 2);
+    }
+
+    Connect(convolution2d, output, outputInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void Convolution2dEndToEnd(const std::vector<armnn::BackendId>& backends,
+                           armnn::DataLayout dataLayout,
+                           bool biasEnabled = true)
+{
+    using namespace armnn;
+
+    const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
+    const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
+
+    TensorInfo inputInfo({ 1, 5, 5, 1 }, ArmnnType, qScale, qOffset, true);
+    TensorInfo outputInfo({ 1, 3, 3, 1 }, ArmnnType, qScale, qOffset);
+    TensorInfo weightsInfo({ 1, 3, 3, 1 }, ArmnnType, qScale, qOffset, true);
+    TensorInfo biasesInfo({ 1 }, ArmnnType, qScale * qScale, 0, true);
+
+    std::vector<float> inputData =
+    {
+        1.0f, 5.0f, 2.0f, 3.0f, 5.0f,
+        8.0f, 7.0f, 3.0f, 6.0f, 3.0f,
+        3.0f, 3.0f, 9.0f, 1.0f, 9.0f,
+        4.0f, 1.0f, 8.0f, 1.0f, 3.0f,
+        6.0f, 8.0f, 1.0f, 9.0f, 2.0f
+    };
+
+    std::vector<float> weightsData =
+    {
+        4.0f, 5.0f, 6.0f,
+        0.0f, 0.0f, 0.0f,
+        3.0f, 2.0f, 1.0f
+    };
+
+    std::vector<float> biasesData = { 1.0f };
+
+    float bias = biasEnabled ? biasesData[0] : 0.0f;
+    std::vector<float> expectedOutputData =
+    {
+        65.0f + bias,  76.0f + bias,  91.0f + bias,
+        107.0f + bias, 99.0f + bias,  89.0f + bias,
+        116.0f + bias, 98.0f + bias,  118.0f + bias,
+    };
+
+    Convolution2dDescriptor descriptor;
+    descriptor.m_PadLeft     = 0;
+    descriptor.m_PadRight    = 0;
+    descriptor.m_PadTop      = 0;
+    descriptor.m_PadBottom   = 0;
+    descriptor.m_StrideX     = 1;
+    descriptor.m_StrideY     = 1;
+    descriptor.m_BiasEnabled = biasEnabled;
+    descriptor.m_DataLayout  = dataLayout;
+
+    if (dataLayout == DataLayout::NCHW)
+    {
+        PermuteTensorNhwcToNchw(inputInfo, inputData);
+        PermuteTensorNhwcToNchw(weightsInfo, weightsData);
+        PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
+    }
+
+    // Quantize data
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
+    std::vector<T> qBiasesData         = armnnUtils::QuantizedVector<T>(biasesData, qScale * qScale, 0);
+
+    ConstTensor weights(weightsInfo, qWeightsData);
+    ConstTensor biases(biasesInfo, qBiasesData);
+
+    INetworkPtr network = CreateConstConvolution2dNetwork(descriptor,
+                                                          inputInfo,
+                                                          weightsInfo,
+                                                          biasesInfo,
+                                                          outputInfo,
+                                                          weights,
+                                                          biases,
+                                                          biasEnabled);
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
+                                                {{ 0, qInputData }},
+                                                {{ 0, qExpectedOutputData }},
+                                                backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 218f6dd..017330e 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -13,6 +13,7 @@
 #include <backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
+#include <backendsCommon/test/Convolution2dEndToEndTestImpl.hpp>
 #include <backendsCommon/test/Convolution3dEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
 #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
@@ -595,6 +596,21 @@
     ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
+TEST_CASE("RefConvolution2dFloat32Test")
+{
+    Convolution2dEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
+}
+
+TEST_CASE("RefConvolution2dNchwFloat32Test")
+{
+    Convolution2dEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
+}
+
+TEST_CASE("RefConvolution2dFloat16Test")
+{
+    Convolution2dEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
+}
+
 TEST_CASE("RefConvolution3dFloat32Test")
 {
     Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
diff --git a/src/backends/tosaCommon/TosaLayerSupportRules.hpp b/src/backends/tosaCommon/TosaLayerSupportRules.hpp
index 792908c..8855dd6 100644
--- a/src/backends/tosaCommon/TosaLayerSupportRules.hpp
+++ b/src/backends/tosaCommon/TosaLayerSupportRules.hpp
@@ -48,14 +48,34 @@
     }
 };
 
-struct TosaContainerContains : public Rule
+struct TosaContainerContainsTwoTypes : public Rule
 {
-    explicit TosaContainerContains(std::tuple<DType, DType>& check, const std::vector<std::tuple<DType, DType>>& c)
+    explicit TosaContainerContainsTwoTypes(std::tuple<DType, DType>& check,
+                                           const std::vector<std::tuple<DType, DType>>& c)
     {
         for (auto item: c)
         {
-            if (std::get<0>(check) == std::get<0>(item)
-                && std::get<1>(check) == std::get<1>(item))
+            if (std::get<0>(check) == std::get<0>(item) &&
+                std::get<1>(check) == std::get<1>(item))
+            {
+                m_Res = true;
+                return;
+            }
+        }
+        m_Res = false;
+    }
+};
+
+struct TosaContainerContainsThreeTypes : public Rule
+{
+    explicit TosaContainerContainsThreeTypes(std::tuple<DType, DType, DType>& check,
+                                             const std::vector<std::tuple<DType, DType, DType>>& c)
+    {
+        for (auto item: c)
+        {
+            if (std::get<0>(check) == std::get<0>(item) &&
+                std::get<1>(check) == std::get<1>(item) &&
+                std::get<2>(check) == std::get<2>(item))
             {
                 m_Res = true;
                 return;
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index a37eaf2..00ba429 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -8,40 +8,33 @@
 using namespace armnn;
 using namespace tosa;
 
-void SetBasicBlockConstantTensorData(Layer* layer, TosaSerializationBasicBlock* /*basicBlock*/)
-{
-    switch (layer->GetType())
-    {
-        case LayerType::Convolution2d:
-        {
-            // ToDo: using Convolution2d as an example as it has constant tensors for weights and bias.
-            // ToDo: manually set TosaOperator data of basicBlock where constant tensors exist.
-        }
-        default:
-            // If no switch statement for layer, no constant tensors exist in that layer, return
-            return;
-    }
-}
-
 TosaSerializationBasicBlock* CreateEmptyTosaSerializationBasicBlock()
 {
-    // empty basic block when no tosa mapping implemented/exists
-    TosaSerializationOperator* op =
-        new TosaSerializationOperator(Op_UNKNOWN, Attribute_NONE, nullptr, {}, {});
+    // Empty basic block when no TOSA mapping implemented/exists
+    auto* op = new TosaSerializationOperator(Op_UNKNOWN, Attribute_NONE, nullptr, {}, {});
     return new TosaSerializationBasicBlock("", {op}, {}, {}, {});
 }
 
-TosaSerializationBasicBlock* GetTosaMapping(const LayerType type,
+TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
+                                            const LayerType type,
                                             const std::vector<const TensorInfo*>& inputs,
                                             const std::vector<const TensorInfo*>& outputs,
-                                            const BaseDescriptor& descriptor,
-                                            bool isMain = false)
+                                            const BaseDescriptor& descriptor)
 {
     switch (type)
     {
         case LayerType::Addition:
         {
-            return ConvertAdditionToTosaOperator(inputs, outputs, isMain);
+            return ConvertAdditionToTosaOperator(layer, inputs, outputs);
+        }
+        case LayerType::Constant:
+        {
+            return ConvertConstantToTosaOperator(layer, outputs);
+        }
+        case LayerType::Convolution2d:
+        {
+            auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
+            return ConvertConv2dToTosaOperator(layer, inputs, outputs, conv2dDesc);
         }
         case LayerType::Pooling2d:
         {
@@ -57,11 +50,11 @@
             }
             else if (avgPoolIgnoreValue)
             {
-                return ConvertAvgPool2DIgnoreValueToTosaOperator(inputs, outputs, isMain, poolDesc);
+                return ConvertAvgPool2DIgnoreValueToTosaOperator(layer, inputs, outputs, poolDesc);
             }
             else
             {
-                return ConvertPooling2DToTosaOperator(inputs, outputs, isMain, poolDesc);
+                return ConvertPooling2DToTosaOperator(layer, inputs, outputs, poolDesc);
             }
         }
         default:
@@ -71,7 +64,7 @@
     }
 }
 
-TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer, bool isMain = false)
+TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer)
 {
     std::vector<const TensorInfo*> inputs;
     for (auto inputSlot : layer->GetInputSlots())
@@ -85,11 +78,10 @@
         outputs.push_back(&outputSlot.GetTensorInfo());
     }
 
-    TosaSerializationBasicBlock* basicBlock = GetTosaMapping(layer->GetType(),
+    TosaSerializationBasicBlock* basicBlock = GetTosaMapping(layer,
+                                                             layer->GetType(),
                                                              inputs,
                                                              outputs,
-                                                             layer->GetParameters(),
-                                                             isMain);
-    SetBasicBlockConstantTensorData(layer, basicBlock);
+                                                             layer->GetParameters());
     return basicBlock;
 }
diff --git a/src/backends/tosaCommon/TosaMappings.hpp b/src/backends/tosaCommon/TosaMappings.hpp
index 8df2493..cc41f1b 100644
--- a/src/backends/tosaCommon/TosaMappings.hpp
+++ b/src/backends/tosaCommon/TosaMappings.hpp
@@ -13,22 +13,18 @@
 using namespace armnn;
 using namespace tosa;
 
-// From the input armnn::Layer, set the corresponding data field in the
-// tosa::TosaSerializationTensor where constant tensor data exists in the armnn::Layer.
-void SetBasicBlockConstantTensorData(Layer* layer, TosaSerializationBasicBlock* /*basicBlock*/);
-
 // Populates a tosa::TosaSerializationBasicBlock from constructing
 // tosa::TosaSerializationOperator(s) and tosa::TosaSerializationTensor(s)
 // based on the input armnn::LayerType and associated armnn::TensorInfos and armnn::Descriptor.
 //
-// If an armnn::LayerType does not have a tosa mapping or the mapping is not implemented in ArmNN,
+// If an armnn::LayerType does not have a TOSA mapping or the mapping is not implemented in ArmNN,
 // an empty tosa::TosaSerializationBasicBlock() is returned with operator tosa::Op_UNKNOWN.
-TosaSerializationBasicBlock* GetTosaMapping(const LayerType type,
+TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
+                                            const LayerType type,
                                             const std::vector<const TensorInfo*>& inputs,
                                             const std::vector<const TensorInfo*>& outputs,
-                                            const BaseDescriptor& /*descriptor*/,
-                                            bool isMain);
+                                            const BaseDescriptor& /*descriptor*/);
 
 // Function called in armnn::OptimizeSubgraphView() when access to armnn::Layer is available
-// and there is an option to set tosa basic block data from constant layer tenors available from the input layer.
-TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer, bool isMain);
+// and there is an option to set TOSA basic block data from constant layer tensors available from the input layer.
+TosaSerializationBasicBlock* GetTosaMappingFromLayer(Layer* layer);
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
index 7967977..66ca869 100644
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
@@ -5,28 +5,36 @@
 
 #include "AdditionOperator.hpp"
 
-TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const std::vector<const TensorInfo*>& inputs,
-                                                           const std::vector<const TensorInfo*>& outputs,
-                                                           bool isMain)
+TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer,
+                                                           const std::vector<const TensorInfo*>& inputs,
+                                                           const std::vector<const TensorInfo*>& outputs)
 {
-    // A helper function with static global variables ensures uniqueness
-    // for dynamically generating input, output and block names
-    std::string input0Name = std::string("Op_ADD_input0_")  + GetUniqueTosaMappingID();
-    std::string input1Name = std::string("Op_ADD_input1_")  + GetUniqueTosaMappingID();
-    std::string outputName = std::string("Op_ADD_output0_") + GetUniqueTosaMappingID();
-    std::string blockName  = std::string("Op_ADD_block_")   + GetUniqueTosaMappingID();
+    std::string input0Name = std::string("input0_");
+    std::string input1Name = std::string("input1_");
+    std::string outputName = std::string("output0_");
+    std::string blockName  = std::string("Op_ADD_block_") + GetUniqueTosaMappingID();
 
-    // If it's the first block, overwrite block name with main.
-    if (isMain)
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
     {
-        blockName = std::string("main");
+        // Get the layers connected to the input slots and determine unique layer names.
+        Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        input0Name = GenerateUniqueName(connectedLayer0, 0);
+
+        Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer();
+        input1Name = GenerateUniqueName(connectedLayer1, 1);
+
+        // Get the layer connected to the output slot and determine unique layer name.
+        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
+        outputName = GenerateUniqueName(connectedOutputLayer, 0);
     }
 
-    TosaSerializationOperator* op = new TosaSerializationOperator(Op_ADD,
-                                                                  Attribute_NONE,
-                                                                  nullptr,
-                                                                  {input0Name, input1Name},
-                                                                  {outputName});
+    auto* op = new TosaSerializationOperator(Op_ADD,
+                                             Attribute_NONE,
+                                             nullptr,
+                                             {input0Name, input1Name},
+                                             {outputName});
 
     std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
     DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
@@ -37,9 +45,9 @@
     std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
 
-    TosaSerializationTensor* inputTensor0  = new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {});
-    TosaSerializationTensor* inputTensor1  = new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {});
-    TosaSerializationTensor* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
+    auto* inputTensor0  = new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {});
+    auto* inputTensor1  = new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {});
+    auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp
index f467bb6..5eb7441 100644
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp
+++ b/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp
@@ -5,15 +5,16 @@
 
 #pragma once
 
+#include "TosaOperatorUtils.hpp"
+
 #include <Layer.hpp>
 
 #include <tosa_serialization_handler.h>
-#include "TosaOperatorUtils.hpp"
 
 using namespace armnn;
 using namespace tosa;
 
-TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const std::vector<const TensorInfo*>& inputs,
-                                                           const std::vector<const TensorInfo*>& outputs,
-                                                           bool isMain);
+TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer,
+                                                           const std::vector<const TensorInfo*>& inputs,
+                                                           const std::vector<const TensorInfo*>& outputs);
 
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
index b3d2687..2601a62 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
@@ -5,23 +5,27 @@
 
 #include "Pooling2DOperator.hpp"
 
-TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const std::vector<const TensorInfo*>& inputs,
+TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Layer* layer,
+                                                                       const std::vector<const TensorInfo*>& inputs,
                                                                        const std::vector<const TensorInfo*>& outputs,
-                                                                       bool isMain,
                                                                        const Pooling2dDescriptor* poolDescriptor)
 {
+    std::string padInputName   = std::string("input0_");
+    std::string padOutputName  = std::string("intermediate0_") + GetUniqueTosaMappingID();
+    std::string poolOutputName = std::string("output0_");
+    std::string blockName      = std::string("Op_AVG_POOL2D_block_") + GetUniqueTosaMappingID();
 
-    // A helper function with static global variables ensures uniqueness
-    // for dynamically generating input, output and block names
-    std::string padInputName   = std::string("Op_PAD_input0_")  + GetUniqueTosaMappingID();
-    std::string padOutputName  = std::string("Op_PAD_intermediate0_")  + GetUniqueTosaMappingID();
-    std::string poolOutputName = std::string("Op_AVG_POOL2D_output0_") + GetUniqueTosaMappingID();
-    std::string blockName      = std::string("Op_AVG_POOL2D_block_")   + GetUniqueTosaMappingID();
-
-    // If it's the first block, overwrite block name with main.
-    if (isMain)
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
     {
-        blockName = std::string("main");
+        // Get the layers connected to the input slots and determine unique layer names.
+        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        padInputName = GenerateUniqueName(connectedInputLayer, 0);
+
+        // Get the layer connected to the output slot and determine unique layer name.
+        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
+        poolOutputName = GenerateUniqueName(connectedOutputLayer, 0);
     }
 
     std::vector<int> paddings;
@@ -51,11 +55,11 @@
     }
 
     TosaPadAttribute padAttribute(paddings, 0, 0.0f);
-    TosaSerializationOperator* opPad = new TosaSerializationOperator(Op_PAD,
-                                                                     Attribute_PadAttribute,
-                                                                     &padAttribute,
-                                                                     {padInputName},
-                                                                     {padOutputName});
+    auto* opPad = new TosaSerializationOperator(Op_PAD,
+                                                Attribute_PadAttribute,
+                                                &padAttribute,
+                                                {padInputName},
+                                                {padOutputName});
 
     std::vector<int> pad    = {0, 0, 0, 0};
     std::vector<int> kernel = {static_cast<int>(poolDescriptor->m_PoolHeight),
@@ -64,11 +68,11 @@
                                static_cast<int>(poolDescriptor->m_StrideX)};
     TosaPoolAttribute poolAttribute(pad, kernel, stride, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
 
-    TosaSerializationOperator* opPool = new TosaSerializationOperator(Op_AVG_POOL2D,
-                                                                      Attribute_PoolAttribute,
-                                                                      &poolAttribute,
-                                                                      {padOutputName},
-                                                                      {poolOutputName});
+    auto* opPool = new TosaSerializationOperator(Op_AVG_POOL2D,
+                                                 Attribute_PoolAttribute,
+                                                 &poolAttribute,
+                                                 {padOutputName},
+                                                 {poolOutputName});
 
     std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
     DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
@@ -92,10 +96,9 @@
                              inputShape[3] + paddings[6] + paddings[7]};
     }
 
-    TosaSerializationTensor* inputTensor  = new TosaSerializationTensor(padInputName, inputShape, inputDType, {});
-    TosaSerializationTensor* intermediateTensor  = new TosaSerializationTensor(
-        padOutputName, intermediateShape, inputDType, {});
-    TosaSerializationTensor* outputTensor = new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {});
+    auto* inputTensor        = new TosaSerializationTensor(padInputName, inputShape, inputDType, {});
+    auto* intermediateTensor = new TosaSerializationTensor(padOutputName, intermediateShape, inputDType, {});
+    auto* outputTensor       = new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {});
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.hpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.hpp
index 63ae190..f9d0975 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.hpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.hpp
@@ -5,15 +5,16 @@
 
 #pragma once
 
+#include "TosaOperatorUtils.hpp"
+
 #include <Layer.hpp>
 
 #include <tosa_serialization_handler.h>
-#include "TosaOperatorUtils.hpp"
 
 using namespace armnn;
 using namespace tosa;
 
-TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const std::vector<const TensorInfo*>& inputs,
+TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Layer* layer,
+                                                                       const std::vector<const TensorInfo*>& inputs,
                                                                        const std::vector<const TensorInfo*>& outputs,
-                                                                       bool isMain,
                                                                        const Pooling2dDescriptor* poolDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index 7fdc922..b256edd 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -8,6 +8,10 @@
         AdditionOperator.cpp
         AvgPool2DIgnoreValueOperator.hpp
         AvgPool2DIgnoreValueOperator.cpp
+        ConstantOperator.hpp
+        ConstantOperator.cpp
+        Conv2dOperator.hpp
+        Conv2dOperator.cpp
         Pooling2DOperator.hpp
         Pooling2DOperator.cpp
         TosaOperatorUtils.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp b/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp
new file mode 100644
index 0000000..5e3973f
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ConstantOperator.cpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConstantOperator.hpp"
+
+#include <layers/ConstantLayer.hpp>
+
+TosaSerializationBasicBlock* ConvertConstantToTosaOperator(const Layer* layer,
+                                                           const std::vector<const TensorInfo*>& outputs)
+{
+    std::string outputName = std::string("constant_");
+    std::string blockName  = std::string("Op_CONST_block_") + GetUniqueTosaMappingID();
+
+    std::vector<uint8_t> uint8Data;
+
+    // If a layer is present then the block will be used for execution, so names need to be unique.
+    // Also, set constant tensor data.
+    if(layer != nullptr)
+    {
+        outputName.append(std::to_string(layer->GetGuid()));
+        blockName.append(std::to_string(layer->GetGuid()));
+
+        auto constantLayer = PolymorphicDowncast<const armnn::ConstantLayer*>(layer);
+        auto tensorInfo = constantLayer->GetOutputSlot().GetTensorInfo();
+
+        uint8Data = ConvertConstantTensorDataToBuffer(constantLayer->m_LayerOutput);
+    }
+
+    auto* op = new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {outputName});
+
+    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+    // Setup output tensor with constant tensor data if available.
+    auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, uint8Data);
+
+    return new TosaSerializationBasicBlock(blockName,       // name
+                                           {op},            // operators
+                                           {outputTensor0}, // tensors
+                                           {},              // inputs
+                                           {outputName});   // outputs
+}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp b/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp
new file mode 100644
index 0000000..df158ac
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ConstantOperator.hpp
@@ -0,0 +1,19 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertConstantToTosaOperator(const Layer* layer,
+                                                           const std::vector<const TensorInfo*>& outputs);
+
diff --git a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
new file mode 100644
index 0000000..9c095d6
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
@@ -0,0 +1,123 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Conv2dOperator.hpp"
+
+TosaSerializationBasicBlock* ConvertConv2dToTosaOperator(const Layer* layer,
+                                                         const std::vector<const TensorInfo*>& inputs,
+                                                         const std::vector<const TensorInfo*>& outputs,
+                                                         const Convolution2dDescriptor* conv2dDescriptor)
+{
+    std::vector<std::string> inputNames;
+    std::string outputName = std::string("output0_");
+    std::string blockName  = std::string("Op_CONV2D_block_") + GetUniqueTosaMappingID();
+
+    // Set input names for validation purposes only.
+    if(layer == nullptr)
+    {
+        inputNames.emplace_back("input0_");
+        inputNames.emplace_back("input1_");
+        if(conv2dDescriptor->m_BiasEnabled)
+        {
+            inputNames.emplace_back("input2_");
+        }
+    }
+    else
+    {
+        // If a layer is present then the block will be used for execution, so input and output names need to be
+        // determined using the previous and following layers so the graph is connected correctly.
+        // For validation this doesn't matter.
+        for (uint32_t i = 0; i < inputs.size(); ++i)
+        {
+            // Get the layer connected to the input slot and determine unique layer name.
+            Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
+
+            std::string inputName = GenerateUniqueName(connectedLayer, i);
+            inputNames.push_back(inputName);
+        }
+
+        // Get the layer connected to the output slot and determine unique layer name.
+        Layer& connectedLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
+
+        outputName = GenerateUniqueName(connectedLayer, 0);
+    }
+
+    std::vector<TosaSerializationTensor*> tensors;
+    std::vector<TosaSerializationOperator*> operators;
+
+    // Setup input Tensor
+    std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
+    DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
+
+    tensors.push_back(new TosaSerializationTensor(inputNames[0], inputShape0, inputDType0, {}));
+
+    // Only add input tensors if weights and bias are not constant or if running validation.
+    // Constant tensors will be created in the ConvertConstantToTosaOperator function.
+    if(!inputs[1]->IsConstant() || layer == nullptr)
+    {
+        std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
+        DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
+
+        tensors.push_back(new TosaSerializationTensor(inputNames[1], inputShape1, inputDType1, {}));
+    }
+
+    if(conv2dDescriptor->m_BiasEnabled)
+    {
+        if(!inputs[2]->IsConstant() || layer == nullptr)
+        {
+            std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
+            DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());
+
+            tensors.push_back(new TosaSerializationTensor(inputNames[2], inputShape2, inputDType2, {}));
+        }
+    }
+    else
+    {
+        // If bias is disabled, create a constant bias of 0 as three inputs are required.
+        std::string constantName = std::string("constant_") + GetUniqueTosaMappingID();
+
+        operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {constantName}));
+
+        std::vector<uint8_t> uint8Data;
+        std::vector<float> data = { 0.0 };
+
+        TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+
+        tensors.push_back(new TosaSerializationTensor(constantName, {1}, DType_FP32, uint8Data));
+        inputNames.emplace_back(constantName);
+    }
+
+    // Setup Output Tensor
+    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
+
+    // Set up CONV2D operator
+    std::vector<int> pad = {static_cast<int>(conv2dDescriptor->m_PadTop),
+                            static_cast<int>(conv2dDescriptor->m_PadBottom),
+                            static_cast<int>(conv2dDescriptor->m_PadLeft),
+                            static_cast<int>(conv2dDescriptor->m_PadRight)};
+    std::vector<int> stride = {static_cast<int>(conv2dDescriptor->m_StrideY),
+                               static_cast<int>(conv2dDescriptor->m_StrideX)};
+    std::vector<int> dilation = {static_cast<int>(conv2dDescriptor->m_DilationY),
+                                 static_cast<int>(conv2dDescriptor->m_DilationX)};
+    TosaConvAttribute attribute(pad, dilation, stride, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
+
+    auto* op = new TosaSerializationOperator(Op_CONV2D,
+                                             Attribute_ConvAttribute,
+                                             &attribute,
+                                             inputNames,
+                                             {outputName});
+    operators.push_back(op);
+
+    // operatorInputNames/operatorOutputNames ends up being the same as
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+    return new TosaSerializationBasicBlock(blockName,     // name
+                                           operators,     // operators
+                                           tensors,       // tensors
+                                           inputNames,    // inputs
+                                           {outputName}); // outputs
+}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.hpp b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.hpp
new file mode 100644
index 0000000..909151b
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertConv2dToTosaOperator(const Layer* layer,
+                                                         const std::vector<const TensorInfo*>& inputs,
+                                                         const std::vector<const TensorInfo*>& outputs,
+                                                         const Convolution2dDescriptor* conv2dDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
index cd707ed..eaeb8a4 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
@@ -5,24 +5,29 @@
 
 #include "Pooling2DOperator.hpp"
 
-TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const std::vector<const TensorInfo*>& inputs,
+TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const Layer* layer,
+                                                            const std::vector<const TensorInfo*>& inputs,
                                                             const std::vector<const TensorInfo*>& outputs,
-                                                            bool isMain,
                                                             const Pooling2dDescriptor* poolDescriptor)
 {
     std::string poolType = (poolDescriptor->m_PoolType == PoolingAlgorithm::Max) ? "Op_MAX" : "Op_AVG";
     Op opcode = (poolDescriptor->m_PoolType == PoolingAlgorithm::Max) ? Op_MAX_POOL2D : Op_AVG_POOL2D;
 
-    // A helper function with static global variables ensures uniqueness
-    // for dynamically generating input, output and block names
-    std::string input0Name = poolType + std::string("_POOL2D_input0_")  + GetUniqueTosaMappingID();
-    std::string outputName = poolType + std::string("_POOL2D_output0_") + GetUniqueTosaMappingID();
-    std::string blockName  = poolType + std::string("_POOL2D_block_")   + GetUniqueTosaMappingID();
+    std::string input0Name = std::string("input0_");
+    std::string outputName = std::string("output0_");
+    std::string blockName  = std::string("Op_") + poolType + std::string("_POOL2D_block_") + GetUniqueTosaMappingID();
 
-    // If it's the first block, overwrite block name with main.
-    if (isMain)
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
     {
-        blockName = std::string("main");
+        // Get the layers connected to the input slots and determine unique layer names.
+        Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        input0Name = GenerateUniqueName(connectedInputLayer, 0);
+
+        // Get the layer connected to the output slot and determine unique layer name.
+        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
+        outputName = GenerateUniqueName(connectedOutputLayer, 0);
     }
 
     std::vector<int> pad = {static_cast<int>(poolDescriptor->m_PadTop),
@@ -35,11 +40,11 @@
                                static_cast<int>(poolDescriptor->m_StrideX)};
     TosaPoolAttribute attribute(pad, kernel, stride, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
 
-    TosaSerializationOperator* op = new TosaSerializationOperator(opcode,
-                                                                  Attribute_PoolAttribute,
-                                                                  &attribute,
-                                                                  {input0Name},
-                                                                  {outputName});
+    auto* op = new TosaSerializationOperator(opcode,
+                                             Attribute_PoolAttribute,
+                                             &attribute,
+                                             {input0Name},
+                                             {outputName});
 
     std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
     DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
@@ -47,8 +52,8 @@
     std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
     DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
 
-    TosaSerializationTensor* inputTensor0  = new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {});
-    TosaSerializationTensor* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
+    auto* inputTensor0  = new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {});
+    auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
 
     // operatorInputNames/operatorOutputNames ends up being the same as
     // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.hpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.hpp
index 22d2a3a..cc9ec09 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.hpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.hpp
@@ -5,15 +5,16 @@
 
 #pragma once
 
+#include "TosaOperatorUtils.hpp"
+
 #include <Layer.hpp>
 
 #include <tosa_serialization_handler.h>
-#include "TosaOperatorUtils.hpp"
 
 using namespace armnn;
 using namespace tosa;
 
-TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const std::vector<const TensorInfo*>& inputs,
+TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const Layer* layer,
+                                                            const std::vector<const TensorInfo*>& inputs,
                                                             const std::vector<const TensorInfo*>& outputs,
-                                                            bool isMain,
                                                             const Pooling2dDescriptor* poolDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index 00b5f0f..513db0c 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -6,5 +6,7 @@
 #pragma once
 
 #include "AdditionOperator.hpp"
+#include "ConstantOperator.hpp"
+#include "Conv2dOperator.hpp"
 #include "AvgPool2DIgnoreValueOperator.hpp"
 #include "Pooling2DOperator.hpp"
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index f51b210..176e4e1 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -5,10 +5,13 @@
 
 #pragma once
 
+#include <Layer.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/Types.hpp>
 
-#include <tosa_generated.h>
+#include "common/include/ProfilingGuid.hpp"
+
+#include <tosa_serialization_handler.h>
 
 using namespace armnn;
 using namespace tosa;
@@ -53,6 +56,33 @@
     return returnShape;
 }
 
+// Function that generates unique name using the layer type, input slot and layer guid.
+inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
+{
+    std::string name;
+    std::string guid        = std::to_string(layer.GetGuid());
+    std::string slotAndGuid = std::to_string(layerSlot) + "_" + guid;
+    LayerType layerType = layer.GetType();
+
+    if (layerType == LayerType::Input)
+    {
+        name = "input" + slotAndGuid;
+    }
+    else if (layerType == LayerType::Output)
+    {
+        name = "output" + slotAndGuid;
+    }
+    else if (layerType == LayerType::Constant)
+    {
+        name = "constant_" + guid;
+    }
+    else
+    {
+        name = "intermediate" + slotAndGuid;
+    }
+    return name;
+}
+
 // Function to return unique int as a string to ensure uniqueness between all input, output and block names.
 static int uniqueTosaMappingID = 0;
 inline std::string GetUniqueTosaMappingID()
@@ -206,3 +236,72 @@
     }
     return "";
 }
+
+inline std::vector<uint8_t> ConvertConstantTensorDataToBuffer(const std::shared_ptr<ConstTensorHandle>& tensorHandle)
+{
+    tosa_err_t error;
+    std::vector<uint8_t> uint8Data;
+    auto tensorInfo = tensorHandle->GetTensorInfo();
+
+    switch (tensorInfo.GetDataType())
+    {
+        case DataType::Float32:
+        {
+            std::vector<float> data(tensorInfo.GetNumElements());
+            memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
+
+            error = TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
+            break;
+        }
+        case DataType::Float16:
+        {
+            std::vector<float> data(tensorInfo.GetNumElements());
+            memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
+
+            error = TosaSerializationHandler::ConvertF16toU8(data, uint8Data);
+            break;
+        }
+        case DataType::QSymmS8:
+        case DataType::QAsymmS8:
+        {
+            std::vector<int8_t> data(tensorInfo.GetNumElements());
+            memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
+
+            error = TosaSerializationHandler::ConvertI8toU8(data, uint8Data);
+            break;
+        }
+        case DataType::QAsymmU8:
+        {
+            memcpy(uint8Data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
+            break;
+        }
+        case DataType::QSymmS16:
+        {
+            std::vector<int16_t> data(tensorInfo.GetNumElements());
+            memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
+
+            error = TosaSerializationHandler::ConvertI16toU8(data, uint8Data);
+            break;
+        }
+        case DataType::Signed32:
+        {
+            std::vector<int32_t> data(tensorInfo.GetNumElements());
+            memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
+
+            error = TosaSerializationHandler::ConvertI32toU8(data, uint8Data);
+            break;
+        }
+        default:
+        {
+            throw armnn::Exception("SetConstantTensorData: An unsupported data type was encountered.");
+        }
+    }
+
+    if(error != tosa_err_t::TOSA_OK)
+    {
+        throw armnn::Exception("SetConstantTensorData: An error occurred when converting constant data");
+    }
+
+    tensorHandle->Unmap();
+    return uint8Data;
+}
diff --git a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
index 8869b3a..a38f66b 100644
--- a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
+++ b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
@@ -17,10 +17,8 @@
 {
     uint32_t numInputs = static_cast<uint32_t>(inputShape.size());
     uint32_t numOutputs = static_cast<uint32_t>(outputShape.size());
-    std::string operatorString0 = TosaOpToString(Op_PAD);
-    std::string operatorString1 = TosaOpToString(Op_AVG_POOL2D);
 
-    std::string blockStr = operatorString1 + "_block_";
+    std::string blockStr = TosaOpToString(Op_AVG_POOL2D) + "_block_";
     CHECK(basicBlock->GetName().find(blockStr)  != std::string::npos);
     CHECK(basicBlock->GetInputs().size() == numInputs);
     CHECK(basicBlock->GetOutputs().size() == numOutputs);
@@ -41,7 +39,7 @@
         std::basic_string<char> blockInputName = basicBlock->GetInputs()[i];
         std::basic_string<char> operatorInputName  = padOp->GetInputTensorNames()[i];
 
-        std::string opStr = operatorString0 + "_input" + std::to_string(i) + "_";
+        std::string opStr = "input" + std::to_string(i) + "_";
 
         CHECK(blockInputName == operatorInputName);
         CHECK(basicBlock->GetTensorByName(blockInputName));
@@ -56,7 +54,7 @@
     for (uint32_t i = 0; i < padOpOutputs; i++)
     {
         std::basic_string<char> operatorOutputName  = padOp->GetOutputTensorNames()[i];
-        std::string opStr = operatorString0 + "_intermediate" + std::to_string(i) + "_";
+        std::string opStr = "intermediate" + std::to_string(i) + "_";
 
         CHECK(basicBlock->GetTensorByName(operatorOutputName));
         CHECK(operatorOutputName.find(opStr)  != std::string::npos);
@@ -86,7 +84,7 @@
     for (uint32_t i = 0; i < poolOpInputs; i++)
     {
         std::basic_string<char> operatorInputName  = poolOp->GetInputTensorNames()[i];
-        std::string opStr = operatorString0 + "_intermediate" + std::to_string(i) + "_";
+        std::string opStr = "intermediate" + std::to_string(i) + "_";
 
         CHECK(basicBlock->GetTensorByName(operatorInputName));
         CHECK(operatorInputName.find(opStr)  != std::string::npos);
@@ -102,7 +100,7 @@
         std::basic_string<char> blockOutputName = basicBlock->GetOutputs()[i];
         std::basic_string<char> operatorOutputName  = poolOp->GetOutputTensorNames()[i];
 
-        std::string opStr = operatorString1 + "_output" + std::to_string(i) + "_";
+        std::string opStr = "output" + std::to_string(i) + "_";
 
         CHECK(blockOutputName == operatorOutputName);
         CHECK(basicBlock->GetTensorByName(blockOutputName));
diff --git a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
index 98fd563..dd61ba8 100644
--- a/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToManyMappingTests.cpp
@@ -30,7 +30,7 @@
     std::vector<std::vector<int32_t>> outputShape        = {{ 1, 1, 3, 3 }};
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMapping(LayerType::Pooling2d, {&inputTensorInfo}, {&outputTensorInfo}, descriptor, false);
+        GetTosaMapping(nullptr, LayerType::Pooling2d, {&inputTensorInfo}, {&outputTensorInfo}, descriptor);
     VerifyAvgPool2DIgnoreValue(basicBlock,
                                inputShape,
                                outputShape,
@@ -74,7 +74,7 @@
     pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool), false);
+        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
     VerifyAvgPool2DIgnoreValue(basicBlock,
                               inputShape,
                               outputShape,
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index 04d1eb4..af9f9e2 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -4,6 +4,7 @@
 //
 
 #include "TosaTestUtils.hpp"
+#include "CommonTestUtils.hpp"
 
 using namespace armnn;
 using namespace tosa;
@@ -18,7 +19,7 @@
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 2, 4, 2 }};
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMapping(LayerType::Addition, {&info, &info}, {&info}, BaseDescriptor(), false);
+        GetTosaMapping(nullptr, LayerType::Addition, {&info, &info}, {&info}, BaseDescriptor());
     AssertTosaOneToOneMappingBasicBlock(
         basicBlock, inputShape, outputShape, Op_ADD, Attribute_NONE, BaseDescriptor(), LayerType::Addition);
 }
@@ -50,11 +51,132 @@
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 2, 4, 2 }};
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(add), false);
+        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(add));
     AssertTosaOneToOneMappingBasicBlock(
         basicBlock, inputShape, outputShape, Op_ADD, Attribute_NONE, BaseDescriptor(), LayerType::Addition);
 }
 
+TEST_CASE("GetTosaMapping_ConstantLayer")
+{
+    TensorInfo outputInfo = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32, 0.0f, 0, true);
+    std::vector<std::vector<int32_t>> outputShape = {{ 1, 2, 4, 2 }};
+
+    TosaSerializationBasicBlock* basicBlock =
+            GetTosaMapping(nullptr, LayerType::Constant, {}, {&outputInfo}, BaseDescriptor());
+    AssertTosaOneToOneMappingBasicBlock(
+            basicBlock, {}, outputShape, Op_CONST, Attribute_NONE, BaseDescriptor(), LayerType::Constant);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_ConstantLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    TensorInfo info = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32, 0.0f, 0, true);
+    std::vector<std::vector<int32_t>> outputShape = {{ 1, 2, 4, 2 }};
+
+    std::vector<float> data = GenerateRandomData<float>(info.GetNumElements());
+    armnn::ConstTensor constTensor(info, data);
+
+    IConnectableLayer* constant = net->AddConstantLayer(constTensor, "constant");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    constant->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    constant->GetOutputSlot(0).SetTensorInfo(info);
+
+    TosaSerializationBasicBlock* basicBlock =
+            GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(constant));
+    AssertTosaOneToOneMappingBasicBlock(
+            basicBlock, {}, outputShape, Op_CONST, Attribute_NONE, BaseDescriptor(), LayerType::Constant);
+}
+
+TEST_CASE("GetTosaMapping_Conv2dLayer")
+{
+    armnn::Convolution2dDescriptor descriptor;
+    descriptor.m_PadLeft     = 1;
+    descriptor.m_PadRight    = 1;
+    descriptor.m_PadTop      = 1;
+    descriptor.m_PadBottom   = 1;
+    descriptor.m_StrideX     = 2;
+    descriptor.m_StrideY     = 2;
+    descriptor.m_DilationX   = 2;
+    descriptor.m_DilationY   = 2;
+    descriptor.m_BiasEnabled = true;
+
+    const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
+    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+
+    std::vector<std::vector<int32_t>> inputShape  = {{ 1, 5, 5, 1 }, { 1, 3, 3, 1 }, { 1 }};
+    std::vector<std::vector<int32_t>> outputShape = {{ 1, 3, 3, 1 }};
+
+    TosaSerializationBasicBlock* basicBlock = GetTosaMapping(nullptr,
+                                                             LayerType::Convolution2d,
+                                                             {&inputInfo, &weightsInfo, &biasesInfo},
+                                                             {&outputInfo},
+                                                             descriptor);
+    AssertTosaOneToOneMappingBasicBlock(
+        basicBlock, inputShape, outputShape, Op_CONV2D, Attribute_ConvAttribute, descriptor, LayerType::Convolution2d);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_Conv2dLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    armnn::Convolution2dDescriptor descriptor;
+    descriptor.m_PadLeft     = 1;
+    descriptor.m_PadRight    = 1;
+    descriptor.m_PadTop      = 1;
+    descriptor.m_PadBottom   = 1;
+    descriptor.m_StrideX     = 2;
+    descriptor.m_StrideY     = 2;
+    descriptor.m_DilationX   = 2;
+    descriptor.m_DilationY   = 2;
+    descriptor.m_BiasEnabled = true;
+
+    const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
+    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+
+    std::vector<std::vector<int32_t>> inputShape  = {{ 1, 5, 5, 1 }};
+    std::vector<std::vector<int32_t>> outputShape = {{ 1, 3, 3, 1 }};
+
+    std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
+    armnn::ConstTensor weights(weightsInfo, weightsData);
+
+    std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
+    armnn::ConstTensor biases(biasesInfo, biasesData);
+
+    armnn::IConnectableLayer* const inputLayer  = net->AddInputLayer(0, "input0");
+    armnn::IConnectableLayer* const weightsLayer = net->AddConstantLayer(weights, "weights");
+    armnn::IConnectableLayer* const biasesLayer = net->AddConstantLayer(biases, "biases");
+    armnn::IConnectableLayer* const convLayer   = net->AddConvolution2dLayer(descriptor, "conv2d");
+    armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+    weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
+    biasesLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
+    convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
+    biasesLayer->GetOutputSlot(0).SetTensorInfo(biasesInfo);
+    convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(convLayer));
+    AssertTosaOneToOneMappingBasicBlock(
+        basicBlock, inputShape, outputShape, Op_CONV2D, Attribute_ConvAttribute, descriptor, LayerType::Convolution2d);
+}
+
 TEST_CASE("GetTosaMapping_MaxPool2DLayer")
 {
     armnn::Pooling2dDescriptor descriptor;
@@ -74,7 +196,7 @@
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 1, 3, 3 }};
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMapping(LayerType::Pooling2d, {&inputTensorInfo}, {&outputTensorInfo}, descriptor, false);
+        GetTosaMapping(nullptr, LayerType::Pooling2d, {&inputTensorInfo}, {&outputTensorInfo}, descriptor);
     AssertTosaOneToOneMappingBasicBlock(
         basicBlock, inputShape, outputShape, Op_MAX_POOL2D, Attribute_PoolAttribute, descriptor, LayerType::Pooling2d);
 }
@@ -114,7 +236,7 @@
     pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool), false);
+        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
     AssertTosaOneToOneMappingBasicBlock(
         basicBlock, inputShape, outputShape, Op_MAX_POOL2D, Attribute_PoolAttribute, descriptor, LayerType::Pooling2d);
 }
@@ -138,7 +260,7 @@
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 1, 3, 3 }};
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMapping(LayerType::Pooling2d, {&inputTensorInfo}, {&outputTensorInfo}, descriptor, false);
+        GetTosaMapping(nullptr, LayerType::Pooling2d, {&inputTensorInfo}, {&outputTensorInfo}, descriptor);
     AssertTosaOneToOneMappingBasicBlock(basicBlock,
                                         inputShape,
                                         outputShape,
@@ -183,7 +305,7 @@
     pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool), false);
+        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(pool));
     AssertTosaOneToOneMappingBasicBlock(basicBlock,
                                         inputShape,
                                         outputShape,
@@ -196,7 +318,7 @@
 TEST_CASE("GetTosaMapping_Unimplemented")
 {
     TosaSerializationBasicBlock* basicBlock =
-        GetTosaMapping(LayerType::UnidirectionalSequenceLstm, {}, {}, BaseDescriptor(), false);
+        GetTosaMapping(nullptr, LayerType::UnidirectionalSequenceLstm, {}, {}, BaseDescriptor());
 
     CHECK(basicBlock->GetName() == "");
     CHECK(basicBlock->GetTensors().size() == 0);
diff --git a/src/backends/tosaCommon/test/TosaTestUtils.hpp b/src/backends/tosaCommon/test/TosaTestUtils.hpp
index a362bde..dd63c0e 100644
--- a/src/backends/tosaCommon/test/TosaTestUtils.hpp
+++ b/src/backends/tosaCommon/test/TosaTestUtils.hpp
@@ -21,6 +21,24 @@
 {
     switch (type)
     {
+        case LayerType::Convolution2d:
+        {
+            auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
+            std::vector<int> pad = {static_cast<int>(conv2dDesc->m_PadTop),
+                                    static_cast<int>(conv2dDesc->m_PadBottom),
+                                    static_cast<int>(conv2dDesc->m_PadLeft),
+                                    static_cast<int>(conv2dDesc->m_PadRight)};
+
+            std::vector<int> dilation = {static_cast<int>(conv2dDesc->m_DilationY),
+                                         static_cast<int>(conv2dDesc->m_DilationX)};
+            std::vector<int> stride = {static_cast<int>(conv2dDesc->m_StrideY),
+                                       static_cast<int>(conv2dDesc->m_StrideX)};
+            TosaConvAttribute convAttribute(attribute);
+            CHECK(pad == convAttribute.pad());
+            CHECK(dilation == convAttribute.dilation());
+            CHECK(stride == convAttribute.stride());
+            break;
+        }
         case LayerType::Pooling2d:
         {
             auto poolDesc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
@@ -80,6 +98,7 @@
             CHECK(pad == poolAttribute.pad());
             CHECK(kernel == poolAttribute.kernel());
             CHECK(stride == poolAttribute.stride());
+            break;
         }
         default:
             break;
@@ -97,18 +116,30 @@
                                                 DType dataType = DType_FP32)
 {
     uint32_t numInputs = static_cast<uint32_t>(inputShape.size());
+    uint32_t numInputTensors = static_cast<uint32_t>(inputShape.size());
     uint32_t numOutputs = static_cast<uint32_t>(outputShape.size());
     std::string operatorString = TosaOpToString(tosaOp);
 
+    // The number of tensors in the block can be different if there are constant layers, as they are created separately.
+    if(type == LayerType::Convolution2d)
+    {
+        numInputTensors = 2;
+        auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
+        if(conv2dDesc->m_BiasEnabled)
+        {
+            numInputTensors = 3;
+        }
+    }
+
     std::string blockStr = operatorString + "_block_";
     CHECK(basicBlock->GetName().find(blockStr)  != std::string::npos);
-    CHECK(basicBlock->GetInputs().size() == numInputs);
+    CHECK(basicBlock->GetInputs().size() == numInputTensors);
     CHECK(basicBlock->GetOutputs().size() == numOutputs);
     CHECK(basicBlock->GetOperators().size() == 1);
     CHECK(basicBlock->GetTensors().size() == (numInputs + numOutputs));
 
     TosaSerializationOperator* op = basicBlock->GetOperators().at(0);
-    CHECK(op->GetInputTensorNames().size() == numInputs);
+    CHECK(op->GetInputTensorNames().size() == numInputTensors);
     CHECK(op->GetOutputTensorNames().size() == numOutputs);
 
     for (uint32_t i = 0; i < numInputs; i++)
@@ -117,11 +148,11 @@
         std::basic_string<char> operatorInputName  = op->GetInputTensorNames()[i];
         std::basic_string<char> tensorName = basicBlock->GetTensors()[i]->GetName();
 
-        std::string opStr = operatorString + "_input" + std::to_string(i) + "_";
+        std::string opStr = "input" + std::to_string(i) + "_";
 
         CHECK(blockInputName == operatorInputName);
         CHECK(tensorName == operatorInputName);
-        CHECK(blockInputName.find(opStr)  != std::string::npos);
+        CHECK(blockInputName.find(opStr) != std::string::npos);
     }
 
     for (uint32_t i = 0; i < numOutputs; i++)
@@ -130,7 +161,11 @@
         std::basic_string<char> operatorOutputName  = op->GetOutputTensorNames()[i];
         std::basic_string<char> tensorName = basicBlock->GetTensors()[numInputs + i]->GetName();
 
-        std::string opStr = operatorString + "_output" + std::to_string(i) + "_";
+        std::string opStr = "output" + std::to_string(i) + "_";
+        if (tosaOp == Op_CONST)
+        {
+            opStr = "constant_";
+        }
 
         CHECK(blockOutputName == operatorOutputName);
         CHECK(tensorName == operatorOutputName);
@@ -152,8 +187,12 @@
     {
         TosaSerializationTensor* tensor = basicBlock->GetTensors()[i + inputShape.size()];
         CHECK(tensor->GetDtype() == dataType);
-        CHECK(tensor->GetData().size() == 0);
         CHECK(tensor->GetShape() == outputShape[static_cast<unsigned long int>(i)]);
+        if (tosaOp != Op_CONST)
+        {
+            // Const tensors contain data.
+            CHECK(tensor->GetData().size() == 0);
+        }
     }
 
     VerifyTosaAttributeFromDescriptor(descriptor,
diff --git a/src/backends/tosaReference/TosaRefBackend.cpp b/src/backends/tosaReference/TosaRefBackend.cpp
index e3a516a..554bb10 100644
--- a/src/backends/tosaReference/TosaRefBackend.cpp
+++ b/src/backends/tosaReference/TosaRefBackend.cpp
@@ -83,16 +83,20 @@
                                                        const ModelOptions& modelOptions) const
 {
     OptimizationViews optimizationViews(modelOptions);
+
     auto handler = std::make_unique<TosaSerializationHandler>();
 
-    // A main block should only be added once.
-    bool isMain = true;
+    std::vector<std::string> graphInputs;
+    std::vector<std::string> graphOutputs;
+
+    std::vector<TosaSerializationOperator*> operators;
+    std::vector<TosaSerializationTensor*> tensors;
 
     auto it = subgraph.endIConnectable();
     while (it != subgraph.beginIConnectable())
     {
         --it;
-        Layer &base = *(PolymorphicDowncast<Layer*>(*it));
+        Layer& base = *(PolymorphicDowncast<Layer*>(*it));
 
         if(base.GetType() == armnn::LayerType::Input ||
            base.GetType() == armnn::LayerType::Output)
@@ -100,15 +104,44 @@
             continue;
         }
 
-        tosa::TosaSerializationBasicBlock* mappings = GetTosaMappingFromLayer(&base, isMain);
-        handler.get()->GetBlocks().push_back(mappings);
+        tosa::TosaSerializationBasicBlock* mappings = GetTosaMappingFromLayer(&base);
 
-        if(isMain)
+        // Loop through inputs to see if there are any graph inputs, if so save them.
+        // If it's an input to the graph "input" can be found in the string.
+        for (uint32_t i = 0; i < mappings->GetInputs().size(); i++)
         {
-            isMain = false;
+            std::basic_string<char> blockInputName = mappings->GetInputs()[i];
+
+            if (blockInputName.find("input") != std::string::npos)
+            {
+                graphInputs.push_back(blockInputName);
+            }
         }
+
+        // Loop through outputs to see if there are any graph outputs, if so save them.
+        // If it's an output to the graph "output" can be found in the string.
+        for (uint32_t i = 0; i < mappings->GetOutputs().size(); i++)
+        {
+            std::basic_string<char> blockOutputName = mappings->GetOutputs()[i];
+
+            if (blockOutputName.find("output") != std::string::npos)
+            {
+                graphOutputs.push_back(blockOutputName);
+            }
+        }
+
+        auto blockOperators = mappings->GetOperators();
+        operators.insert(operators.end(), blockOperators.begin(), blockOperators.end());
+
+        auto blockTensors = mappings->GetTensors();
+        tensors.insert(tensors.end(), blockTensors.begin(), blockTensors.end());
     }
 
+    // Add all mappings to main block, the TOSA Reference Model requires the full graph to be in one block called main.
+    auto* block = new TosaSerializationBasicBlock("main", operators, tensors, graphInputs, graphOutputs);
+
+    handler.get()->GetBlocks().push_back(block);
+
     auto compiledBlob =
             std::make_unique<PreCompiledObjectPtr>(handler.release(), DeleteAsType<TosaSerializationHandler>);
 
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index ce4abbf..848b7ef 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -102,7 +102,7 @@
         std::tuple<DType, DType> mappingType(input->GetDtype(), output->GetDtype());
 
         // Check Dtype from tensor (GetDtype)
-        supported &= CheckSupportRule(TosaContainerContains(mappingType, supportedMappingTypes),
+        supported &= CheckSupportRule(TosaContainerContainsTwoTypes(mappingType, supportedMappingTypes),
                                       reasonIfUnsupported,
                                       std::string("TOSA Reference Operator: " + opString + " for input: " +
                                           input->GetName() + " and output: " + output->GetName() +
@@ -125,6 +125,58 @@
     return supported;
 }
 
+static bool RunTosaLayerChecksInputWeightsOutputDataType(
+        TosaSerializationOperator* op,
+        const std::vector<TosaSerializationTensor*>& inputs,
+        const std::vector<TosaSerializationTensor*>& outputs,
+        const std::vector<Attribute>& supportedAttributes,
+        const std::vector<std::tuple<DType, DType, DType>>& supportedMappingTypes,
+        Optional<string&> reasonIfUnsupported)
+{
+    bool supported = true;
+
+    std::string opString = TosaOpToString(op->GetOp());
+
+    // Check Attribute from operator (GetAttribute)
+    supported &= CheckSupportRule(TosaOperatorAttributeOfAny(op, supportedAttributes), reasonIfUnsupported,
+                                  std::string("TOSA Reference Operator: " + opString +
+                                              " has an unsupported attribute.").c_str());
+
+    // Check combination of input, weights and output types.
+    // Bias is the same as output type, so it is covered.
+    std::tuple<DType, DType, DType> mappingTypes(inputs[0]->GetDtype(), inputs[1]->GetDtype(), outputs[0]->GetDtype());
+
+    // Check Dtype from tensor (GetDtype)
+    supported &= CheckSupportRule(TosaContainerContainsThreeTypes(mappingTypes, supportedMappingTypes),
+                                  reasonIfUnsupported,
+                                  std::string("TOSA Reference Operator: " + opString + " for input 0: " +
+                                              inputs[0]->GetName() + ", input 1: " + inputs[1]->GetName() +
+                                              " and output: " + outputs[0]->GetName() +
+                                              " has an unsupported input data type combination.").c_str());
+
+    for (auto input : inputs)
+    {
+        // Check Shape from tensor (GetShape)
+        supported &= CheckSupportRule(TosaTensorNumDimensionsWithinBounds(input),
+                                      reasonIfUnsupported,
+                                      std::string("Tosa Reference Operator: " + opString + " for input: " +
+                                                  input->GetName() + " exceeds MaxNumOfTensorDimensions.").c_str());
+    }
+
+    for (auto output : outputs)
+    {
+        // Check Shape from tensor (GetShape)
+        supported &= CheckSupportRule(TosaTensorNumDimensionsWithinBounds(output),
+                                      reasonIfUnsupported,
+                                      std::string("Tosa Reference Operator: " + opString + " for output: " +
+                                                  output->GetName() + " exceeds MaxNumOfTensorDimensions.").c_str());
+    }
+
+    return supported;
+}
+
+
+
 static bool IsTosaLayerSupported(TosaSerializationOperator* op,
                                  const std::vector<TosaSerializationTensor*>& inputs,
                                  const std::vector<TosaSerializationTensor*>& outputs,
@@ -134,10 +186,7 @@
     {
         case tosa::Op_ADD:
         {
-            std::vector<Attribute> supportedAttributes =
-            {
-                Attribute_NONE
-            };
+            std::vector<Attribute> supportedAttributes = { Attribute_NONE };
 
             // Only Int32, Fp32 and Fp16 are currently supported by the TOSA Reference Model.
             std::vector<DType> supportedTypes =
@@ -148,19 +197,46 @@
             };
 
             // Check the attribute, data types and bounds for inputs and outputs.
-            return RunTosaLayerChecksSingleDataType(op,
-                                                    inputs,
-                                                    outputs,
-                                                    supportedAttributes,
-                                                    supportedTypes,
-                                                    reasonIfUnsupported);
+            return RunTosaLayerChecksSingleDataType(
+                    op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported);
+        }
+        case tosa::Op_CONST:
+        {
+            std::vector<Attribute> supportedAttributes = { Attribute_NONE };
+
+            std::vector<DType> supportedTypes =
+            {
+                DType_FP16,
+                DType_FP32,
+                DType_UINT8,
+                DType_INT8,
+                DType_INT16,
+                DType_INT32,
+                DType_BOOL
+            };
+
+            // Check the attribute, data types and bounds for inputs and outputs.
+            return RunTosaLayerChecksSingleDataType(
+                    op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported);
+        }
+        case tosa::Op_CONV2D:
+        {
+            std::vector<Attribute> supportedAttributes = { Attribute_ConvAttribute };
+
+            std::vector<std::tuple<DType, DType, DType>> supportedTypesMapping =
+            {
+                std::tuple<DType, DType, DType>(DType_FP16, DType_FP16, DType_FP16),
+                std::tuple<DType, DType, DType>(DType_FP16, DType_FP16, DType_FP32),
+                std::tuple<DType, DType, DType>(DType_FP32, DType_FP32, DType_FP32),
+                std::tuple<DType, DType, DType>(DType_INT8, DType_INT8, DType_INT32)
+            };
+
+            return RunTosaLayerChecksInputWeightsOutputDataType(
+                    op, inputs, outputs, supportedAttributes, supportedTypesMapping, reasonIfUnsupported);
         }
         case tosa::Op_AVG_POOL2D:
         {
-            std::vector<Attribute> supportedAttributes =
-            {
-                Attribute_PoolAttribute
-            };
+            std::vector<Attribute> supportedAttributes = { Attribute_PoolAttribute };
 
             std::vector<std::tuple<DType, DType>> supportedTypesMapping =
             {
@@ -172,19 +248,12 @@
             };
 
             // Check the attribute, data types and bounds for inputs and outputs.
-            return RunTosaLayerChecksInputOutputDataType(op,
-                                                         inputs,
-                                                         outputs,
-                                                         supportedAttributes,
-                                                         supportedTypesMapping,
-                                                         reasonIfUnsupported);
+            return RunTosaLayerChecksInputOutputDataType(
+                    op, inputs, outputs, supportedAttributes, supportedTypesMapping, reasonIfUnsupported);
         }
         case tosa::Op_MAX_POOL2D:
         {
-            std::vector<Attribute> supportedAttributes =
-            {
-                Attribute_PoolAttribute
-            };
+            std::vector<Attribute> supportedAttributes = { Attribute_PoolAttribute };
 
             std::vector<DType> supportedTypes =
             {
@@ -195,19 +264,12 @@
             };
 
             // Check the attribute, data types and bounds for inputs and outputs.
-            return RunTosaLayerChecksSingleDataType(op,
-                                                    inputs,
-                                                    outputs,
-                                                    supportedAttributes,
-                                                    supportedTypes,
-                                                    reasonIfUnsupported);
+            return RunTosaLayerChecksSingleDataType(
+                    op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported);
         }
         case tosa::Op_PAD:
         {
-            std::vector<Attribute> supportedAttributes =
-            {
-                Attribute_PadAttribute
-            };
+            std::vector<Attribute> supportedAttributes = { Attribute_PadAttribute };
 
             std::vector<DType> supportedTypes =
             {
@@ -220,12 +282,8 @@
             };
 
             // Check the attribute, data types and bounds for inputs and outputs.
-            return RunTosaLayerChecksSingleDataType(op,
-                                                    inputs,
-                                                    outputs,
-                                                    supportedAttributes,
-                                                    supportedTypes,
-                                                    reasonIfUnsupported);
+            return RunTosaLayerChecksSingleDataType(
+                    op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported);
         }
         default:
             SetValueChecked(reasonIfUnsupported, "Operation is currently unsupported by the TOSA Reference Backend.");
@@ -248,15 +306,31 @@
 
     switch (type)
     {
+        case LayerType::Input:
+        case LayerType::Output:
+            return true;
         case LayerType::Addition:
             // Setup inputs and outputs
             inputInfos.push_back(&infos[0]);
             inputInfos.push_back(&infos[1]);
             outputInfos.push_back(&infos[2]);
             break;
-        case LayerType::Input:
-        case LayerType::Output:
-            return true;
+        case LayerType::Constant:
+            outputInfos.push_back(&infos[0]);
+            break;
+        case LayerType::Convolution2d:
+        {
+            inputInfos.push_back(&infos[0]); // input
+            outputInfos.push_back(&infos[1]); // output
+            inputInfos.push_back(&infos[2]); // weights
+
+            auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
+            if(conv2dDesc->m_BiasEnabled)
+            {
+                inputInfos.push_back(&infos[3]); // bias
+            }
+            break;
+        }
         case LayerType::Pooling2d:
             // Setup inputs and outputs
             inputInfos.push_back(&infos[0]);
@@ -266,7 +340,7 @@
             break;
     }
 
-    auto mappings = GetTosaMapping(type, inputInfos, outputInfos, descriptor, false);
+    auto mappings = GetTosaMapping(nullptr, type, inputInfos, outputInfos, descriptor);
     if (mappings->GetName() == "")
     {
         // There currently isn't a TOSA mapping for this layer, as the default was returned.
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index fbe1265..4245f0d 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -6,6 +6,7 @@
 #include "backendsCommon/test/EndToEndTestImpl.hpp"
 
 #include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
+#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 
 #include <doctest/doctest.h>
@@ -30,6 +31,17 @@
     AdditionEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
 }
 
+// Conv2d
+TEST_CASE("TosaRefConv2dEndtoEndTestFloat32")
+{
+    Convolution2dEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends, armnn::DataLayout::NHWC);
+}
+
+TEST_CASE("TosaRefConv2dWithoutBiasEndtoEndTestFloat32")
+{
+    Convolution2dEndToEnd<armnn::DataType::Float32>(tosaDefaultBackends, armnn::DataLayout::NHWC, false);
+}
+
 // Max Pool 2D
 TEST_CASE("TosaRefMaxPool2DEndtoEndTestFloat32")
 {
diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
index 48eca34..e6fbbf9 100644
--- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
@@ -58,11 +58,98 @@
 
     CHECK(!supported);
     REQUIRE(reasonIfNotSupported.find(
-        "TOSA Reference Operator: Op_ADD for input: Op_ADD_input0_") != std::string::npos);
+        "TOSA Reference Operator: Op_ADD for input: input0_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
-        "TOSA Reference Operator: Op_ADD for input: Op_ADD_input1_") != std::string::npos);
+        "TOSA Reference Operator: Op_ADD for input: input1_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
-        "TOSA Reference Operator: Op_ADD for output: Op_ADD_output0_") != std::string::npos);
+        "TOSA Reference Operator: Op_ADD for output: output0_") != std::string::npos);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceConstant")
+{
+    armnn::TensorInfo outputInfo({1,1,3,4}, armnn::DataType::Float32);
+
+    armnn::TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Constant,
+                                                     {outputInfo},
+                                                     armnn::BaseDescriptor(),
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceConstantUnsupported")
+{
+    armnn::TensorInfo outputInfo({1,1,3,4}, armnn::DataType::Signed64);
+
+    armnn::TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Constant,
+                                                     {outputInfo},
+                                                     armnn::BaseDescriptor(),
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(!supported);
+    REQUIRE(reasonIfNotSupported.find(
+            "TOSA Reference Operator: Op_CONST for output: constant_") != std::string::npos);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceConv2d")
+{
+    armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
+    armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
+    armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
+    armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+
+    armnn::Convolution2dDescriptor desc;
+    desc.m_BiasEnabled = true;
+
+    armnn::TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d,
+                                                     {inputInfo, outputInfo, weightsInfo, biasesInfo},
+                                                     desc,
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceConv2dUnsupported")
+{
+    // If inputs and weights are Fp32, output must match.
+    armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
+    armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Signed64);
+    armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+
+    armnn::Convolution2dDescriptor desc;
+    desc.m_BiasEnabled = true;
+
+    armnn::TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d,
+                                                     {inputInfo, outputInfo, weightsInfo, biasesInfo},
+                                                     desc,
+                                                     armnn::EmptyOptional(),
+                                                     armnn::EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(!supported);
+    REQUIRE(reasonIfNotSupported.find(
+            "TOSA Reference Operator: Op_CONV2D for input 0: input0_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+            "input 1: input1_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+            "and output: output0_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+            "has an unsupported input data type combination.") != std::string::npos);
 }
 
 TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2d")
@@ -150,9 +237,9 @@
 
     CHECK(!supported);
     REQUIRE(reasonIfNotSupported.find(
-        "TOSA Reference Operator: Op_MAX_POOL2D for input: Op_MAX_POOL2D_input0_") != std::string::npos);
+        "TOSA Reference Operator: Op_MAX_POOL2D for input: input0_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
-        "TOSA Reference Operator: Op_MAX_POOL2D for output: Op_MAX_POOL2D_output0_") != std::string::npos);
+        "TOSA Reference Operator: Op_MAX_POOL2D for output: output0_") != std::string::npos);
 }
 
 TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2dUnsupported_InputOutputDatatypeDifferent")
@@ -177,9 +264,9 @@
 
     CHECK(!supported);
     REQUIRE(reasonIfNotSupported.find(
-        "TOSA Reference Operator: Op_AVG_POOL2D for input: Op_PAD_intermediate0_") != std::string::npos);
+        "TOSA Reference Operator: Op_AVG_POOL2D for input: intermediate0_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
-        " and output: Op_AVG_POOL2D_output0_") != std::string::npos);
+        " and output: output0_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
         " has an unsupported input data type: 8 to output data type: 10") != std::string::npos);
 }
diff --git a/src/backends/tosaReference/workloads/TosaRefPreCompiledWorkload.cpp b/src/backends/tosaReference/workloads/TosaRefPreCompiledWorkload.cpp
index ffdbf6f..ba353a3 100644
--- a/src/backends/tosaReference/workloads/TosaRefPreCompiledWorkload.cpp
+++ b/src/backends/tosaReference/workloads/TosaRefPreCompiledWorkload.cpp
@@ -23,13 +23,10 @@
 
 void TosaRefPreCompiledWorkload::Execute() const
 {
-    uint32_t numInputBuffers  = static_cast<uint32_t>(m_Data.m_Inputs.size());
-    uint32_t numOutputBuffers = static_cast<uint32_t>(m_Data.m_Outputs.size());
-
     tosa::TosaSerializationHandler* handler = static_cast<tosa::TosaSerializationHandler*>(m_Data.m_PreCompiledObject);
 
-    std::vector<std::string> input_names = handler->GetInputs();
-    std::vector<std::string> output_names = handler->GetOutputs();
+    std::vector<std::string> inputNames = handler->GetInputs();
+    std::vector<std::string> outputNames = handler->GetOutputs();
 
     TosaReference::IModelRunner runner;
     GraphStatus status;
@@ -42,29 +39,29 @@
     }
 
     // Set the inputs
-    for (uint32_t inputSlotIdx = 0; inputSlotIdx < numInputBuffers; ++inputSlotIdx)
+    for (uint32_t inputSlotIdx = 0; inputSlotIdx < inputNames.size(); ++inputSlotIdx)
     {
         DataType dataType = m_workloadInfo.m_InputTensorInfos[inputSlotIdx].GetDataType();
         switch (dataType)
         {
             case DataType::Float16:
-                SetInput<half_float::half>(runner, input_names[inputSlotIdx], inputSlotIdx);
+                SetInput<half_float::half>(runner, inputNames[inputSlotIdx], inputSlotIdx);
                 break;
             case DataType::Float32:
-                SetInput<float>(runner, input_names[inputSlotIdx], inputSlotIdx);
+                SetInput<float>(runner, inputNames[inputSlotIdx], inputSlotIdx);
                 break;
             case DataType::QAsymmU8:
             case DataType::QAsymmS8:
             case DataType::QSymmS8:
             case DataType::QSymmS16:
             case DataType::Signed32:
-                SetInput<int32_t>(runner, input_names[inputSlotIdx], inputSlotIdx);
+                SetInput<int32_t>(runner, inputNames[inputSlotIdx], inputSlotIdx);
                 break;
             case DataType::Signed64:
-                SetInput<int64_t>(runner, input_names[inputSlotIdx], inputSlotIdx);
+                SetInput<int64_t>(runner, inputNames[inputSlotIdx], inputSlotIdx);
                 break;
             case DataType::Boolean:
-                SetInput<unsigned char>(runner, input_names[inputSlotIdx], inputSlotIdx);
+                SetInput<unsigned char>(runner, inputNames[inputSlotIdx], inputSlotIdx);
                 break;
             default:
                 throw armnn::Exception("Input data type is unsupported in TOSA Reference Backend.");
@@ -79,29 +76,29 @@
     }
 
     // Gets the outputs
-    for (uint32_t outputSlotIdx = 0; outputSlotIdx < numOutputBuffers; ++outputSlotIdx)
+    for (uint32_t outputSlotIdx = 0; outputSlotIdx < outputNames.size(); ++outputSlotIdx)
     {
         DataType dataType = m_workloadInfo.m_OutputTensorInfos[outputSlotIdx].GetDataType();
         switch (dataType)
         {
             case DataType::Float16:
-                GetOutput<half_float::half>(runner, output_names[outputSlotIdx], outputSlotIdx);
+                GetOutput<half_float::half>(runner, outputNames[outputSlotIdx], outputSlotIdx);
                 break;
             case DataType::Float32:
-                GetOutput<float>(runner, output_names[outputSlotIdx], outputSlotIdx);
+                GetOutput<float>(runner, outputNames[outputSlotIdx], outputSlotIdx);
                 break;
             case DataType::QAsymmU8:
             case DataType::QAsymmS8:
             case DataType::QSymmS8:
             case DataType::QSymmS16:
             case DataType::Signed32:
-                GetOutput<int32_t>(runner, output_names[outputSlotIdx], outputSlotIdx);
+                GetOutput<int32_t>(runner, outputNames[outputSlotIdx], outputSlotIdx);
                 break;
             case DataType::Signed64:
-                GetOutput<int64_t>(runner, output_names[outputSlotIdx], outputSlotIdx);
+                GetOutput<int64_t>(runner, outputNames[outputSlotIdx], outputSlotIdx);
                 break;
             case DataType::Boolean:
-                GetOutput<unsigned char>(runner, output_names[outputSlotIdx], outputSlotIdx);
+                GetOutput<unsigned char>(runner, outputNames[outputSlotIdx], outputSlotIdx);
                 break;
             default:
                 throw armnn::Exception("Output data type is unsupported in TOSA Reference Backend.");