IVGCVSW-7174 Add Reshape support to TOSA Reference Backend

 * Spelling corrections and code refactors added to TosaCommon
 * TosaDTypeToString() implemented and used in TosaRef IsLayerSupported()
   instead of enum integer.
 * Using namespace armnn in TosaCommon OneToOneMappingTests and
   TosaReference TosaRefLayerSupportTests instead of armnn::ClassName.
 * Updated VerifyTosaAttribute() to also verify certain attributes
   from input and output shapes.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I71dfca404d081a665f748ab724153c6dc36b7eca
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index d833caa..881e4d6 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -49,6 +49,7 @@
     QuantizedLstmEndToEndTestImpl.cpp
     QuantizedLstmEndToEndTestImpl.hpp
     RankEndToEndTestImpl.hpp
+    ReshapeEndToEndTestImpl.hpp
     ResizeEndToEndTestImpl.hpp
     RuntimeTestImpl.hpp
     SpaceToDepthEndToEndTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp
new file mode 100644
index 0000000..4cefb6d
--- /dev/null
+++ b/src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp
@@ -0,0 +1,91 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/INetwork.hpp>
+
+#include <CommonTestUtils.hpp>
+#include <ResolveType.hpp>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+armnn::INetworkPtr CreateReshapeNetwork(const armnn::TensorShape& inputShape,
+                                        const armnn::TensorShape& outputShape,
+                                        const armnn::ReshapeDescriptor& descriptor,
+                                        const float qScale = 1.0f,
+                                        const int32_t qOffset = 0)
+{
+    using namespace armnn;
+
+    INetworkPtr network(INetwork::Create());
+
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
+    TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+
+
+    IConnectableLayer* reshape = network->AddReshapeLayer(descriptor, "reshape");
+    IConnectableLayer* input   = network->AddInputLayer(0, "input");
+    IConnectableLayer* output  = network->AddOutputLayer(0, "output");
+
+    Connect(input, reshape, inputTensorInfo, 0, 0);
+    Connect(reshape, output, outputTensorInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ReshapeEndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+
+    const TensorShape& inputShape = { 2, 3 };
+    const TensorShape& outputShape = { 6 };
+
+    ReshapeDescriptor descriptor;
+    descriptor.m_TargetShape = outputShape;
+
+    INetworkPtr network = CreateReshapeNetwork<ArmnnType>(inputShape, outputShape, descriptor);
+
+    CHECK(network);
+
+    std::vector<T> data{ 1, 2, 3,
+                         4, 5, 6 };
+
+    std::map<int, std::vector<T>> inputTensorData = { { 0, data } };
+    std::map<int, std::vector<T>> expectedOutputData = { { 0, data } };
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+template<armnn::DataType ArmnnType>
+void ReshapeEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+    using namespace half_float::literal;
+    using Half = half_float::half;
+
+    const TensorShape& inputShape = { 2, 3 };
+    const TensorShape& outputShape = { 6 };
+
+    ReshapeDescriptor descriptor;
+    descriptor.m_TargetShape = outputShape;
+
+    INetworkPtr network = CreateReshapeNetwork<ArmnnType>(inputShape, outputShape, descriptor);
+    CHECK(network);
+
+    std::vector<Half> data{ 1._h, 2._h, 3._h,
+                            4._h, 5._h, 6._h };
+
+    std::map<int, std::vector<Half>> inputTensorData = { { 0, data } };
+    std::map<int, std::vector<Half>> expectedOutputData = { { 0, data } };
+
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 00ba429..318735d 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -57,6 +57,11 @@
                 return ConvertPooling2DToTosaOperator(layer, inputs, outputs, poolDesc);
             }
         }
+        case LayerType::Reshape:
+        {
+            auto reshapeDesc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
+            return ConvertReshapeToTosaOperator(layer, inputs, outputs, reshapeDesc);
+        }
         default:
         {
             return CreateEmptyTosaSerializationBasicBlock();
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
index 66ca869..f1fb34c 100644
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
@@ -50,7 +50,7 @@
     auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
 
     // operatorInputNames/operatorOutputNames ends up being the same as
-    // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {op}, // operators
                                            {inputTensor0, inputTensor1, outputTensor0}, // tensors
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
index 2601a62..7e7631d 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
@@ -101,7 +101,7 @@
     auto* outputTensor       = new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {});
 
     // operatorInputNames/operatorOutputNames ends up being the same as
-    // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {opPad, opPool}, // operators
                                            {inputTensor, intermediateTensor, outputTensor}, // tensors
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index b256edd..7733d01 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -14,6 +14,8 @@
         Conv2dOperator.cpp
         Pooling2DOperator.hpp
         Pooling2DOperator.cpp
+        ReshapeOperator.hpp
+        ReshapeOperator.cpp
         TosaOperatorUtils.hpp
     )
 
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
index eaeb8a4..265901e 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
@@ -56,7 +56,7 @@
     auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
 
     // operatorInputNames/operatorOutputNames ends up being the same as
-    // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
     return new TosaSerializationBasicBlock(blockName, // name
                                            {op}, // operators
                                            {inputTensor0, outputTensor0}, // tensors
diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
new file mode 100644
index 0000000..b88a6ef
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ReshapeOperator.hpp"
+
+TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer,
+                                                          const std::vector<const TensorInfo*>& inputs,
+                                                          const std::vector<const TensorInfo*>& outputs,
+                                                          const ReshapeDescriptor* reshapeDescriptor)
+{
+    std::string inputName = std::string("input0_");
+    std::string outputName = std::string("output0_");
+    std::string blockName  = std::string("Op_RESHAPE_block_") + GetUniqueTosaMappingID();
+
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    if(layer != nullptr)
+    {
+        // Get the layers connected to the input slots and determine unique layer names.
+        Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+        inputName = GenerateUniqueName(connectedLayer, 0);
+
+        // Get the layer connected to the output slot and determine unique layer name.
+        Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();
+        outputName = GenerateUniqueName(connectedOutputLayer, 0);
+    }
+
+    TosaReshapeAttribute attribute(GetTosaTensorShape(reshapeDescriptor->m_TargetShape));
+
+    auto* op = new TosaSerializationOperator(Op_RESHAPE,
+                                             Attribute_ReshapeAttribute,
+                                             &attribute,
+                                             {inputName},
+                                             {outputName});
+
+    std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
+    DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+
+    std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
+    DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
+
+    auto* inputTensor  = new TosaSerializationTensor(inputName, inputShape, inputDType, {});
+    auto* outputTensor = new TosaSerializationTensor(outputName, outputShape, outputDType, {});
+
+    // operatorInputNames/operatorOutputNames ends up being the same as
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+    return new TosaSerializationBasicBlock(blockName, // name
+                                           {op}, // operators
+                                           {inputTensor, outputTensor}, // tensors
+                                           {inputName}, // inputs
+                                           {outputName}); // outputs
+}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp
new file mode 100644
index 0000000..4f363df
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TosaOperatorUtils.hpp"
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer,
+                                                          const std::vector<const TensorInfo*>& inputs,
+                                                          const std::vector<const TensorInfo*>& outputs,
+                                                          const ReshapeDescriptor* reshapeDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index 513db0c..0711095 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -9,4 +9,5 @@
 #include "ConstantOperator.hpp"
 #include "Conv2dOperator.hpp"
 #include "AvgPool2DIgnoreValueOperator.hpp"
-#include "Pooling2DOperator.hpp"
\ No newline at end of file
+#include "Pooling2DOperator.hpp"
+#include "ReshapeOperator.hpp"
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
index 176e4e1..288966b 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp
@@ -59,28 +59,20 @@
 // Function that generates unique name using the layer type, input slot and layer guid.
 inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
 {
-    std::string name;
     std::string guid        = std::to_string(layer.GetGuid());
     std::string slotAndGuid = std::to_string(layerSlot) + "_" + guid;
-    LayerType layerType = layer.GetType();
 
-    if (layerType == LayerType::Input)
+    switch (layer.GetType())
     {
-        name = "input" + slotAndGuid;
+        case LayerType::Input:
+            return "input" + slotAndGuid;
+        case LayerType::Output:
+            return "output" + slotAndGuid;
+        case LayerType::Constant:
+            return "constant_" + guid;
+        default:
+            return "intermediate" + slotAndGuid;
     }
-    else if (layerType == LayerType::Output)
-    {
-        name = "output" + slotAndGuid;
-    }
-    else if (layerType == LayerType::Constant)
-    {
-        name = "constant_" + guid;
-    }
-    else
-    {
-        name = "intermediate" + slotAndGuid;
-    }
-    return name;
 }
 
 // Function to return unique int as a string to ensure uniqueness between all input, output and block names.
@@ -90,6 +82,37 @@
     return std::to_string(++uniqueTosaMappingID);
 }
 
+// Function to return Tosa DType as string.
+inline std::string TosaDTypeToString(DType tosaDType)
+{
+    switch (tosaDType)
+    {
+        case DType_UNKNOWN:
+            return "DType_UNKNOWN";
+        case DType_BOOL:
+            return "DType_BOOL";
+        case DType_UINT8:
+            return "DType_UINT8";
+        case DType_INT4:
+            return "DType_INT4";
+        case DType_INT8:
+            return "DType_INT8";
+        case DType_INT16:
+            return "DType_INT16";
+        case DType_INT32:
+            return "DType_INT32";
+        case DType_INT48:
+            return "DType_INT48";
+        case DType_FP32:
+            return "DType_FP32";
+        case DType_UINT16:
+            return "DType_UINT16";
+        case DType_FP16:
+            return "DType_FP16";
+    }
+    return "";
+}
+
 // Function to return Tosa Op as string.
 inline std::string TosaOpToString(Op tosaOp)
 {
diff --git a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
index a38f66b..6f57c4a 100644
--- a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
+++ b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp
@@ -68,9 +68,11 @@
     CHECK(padOp->GetAttributeType() == Attribute_PadAttribute);
     CHECK(padOp->GetOp() == Op_PAD);
 
-    VerifyTosaAttributeFromDescriptor(descriptor,
-                                      padOp->GetAttribute(),
-                                      LayerType::Pooling2d);
+    VerifyTosaAttribute(descriptor,
+                        padOp->GetAttribute(),
+                        inputShape[0],
+                        outputShape[0],
+                        LayerType::Pooling2d);
 
     //
     // Verify average pool operator second.
@@ -115,9 +117,11 @@
     CHECK(poolOp->GetAttributeType() == Attribute_PoolAttribute);
     CHECK(poolOp->GetOp() == Op_AVG_POOL2D);
 
-    VerifyTosaAttributeFromDescriptor(descriptor,
-                                      poolOp->GetAttribute(),
-                                      LayerType::Pooling2d,
-                                      1);
+    VerifyTosaAttribute(descriptor,
+                        poolOp->GetAttribute(),
+                        inputShape[0],
+                        outputShape[0],
+                        LayerType::Pooling2d,
+                        1);
 
 }
\ No newline at end of file
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index af9f9e2..b1fa684 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -79,7 +79,7 @@
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 2, 4, 2 }};
 
     std::vector<float> data = GenerateRandomData<float>(info.GetNumElements());
-    armnn::ConstTensor constTensor(info, data);
+    ConstTensor constTensor(info, data);
 
     IConnectableLayer* constant = net->AddConstantLayer(constTensor, "constant");
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
@@ -95,7 +95,7 @@
 
 TEST_CASE("GetTosaMapping_Conv2dLayer")
 {
-    armnn::Convolution2dDescriptor descriptor;
+    Convolution2dDescriptor descriptor;
     descriptor.m_PadLeft     = 1;
     descriptor.m_PadRight    = 1;
     descriptor.m_PadTop      = 1;
@@ -106,10 +106,10 @@
     descriptor.m_DilationY   = 2;
     descriptor.m_BiasEnabled = true;
 
-    const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
-    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    const TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+    const TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    const TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+    const TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
 
     std::vector<std::vector<int32_t>> inputShape  = {{ 1, 5, 5, 1 }, { 1, 3, 3, 1 }, { 1 }};
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 3, 3, 1 }};
@@ -131,7 +131,7 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    armnn::Convolution2dDescriptor descriptor;
+    Convolution2dDescriptor descriptor;
     descriptor.m_PadLeft     = 1;
     descriptor.m_PadRight    = 1;
     descriptor.m_PadTop      = 1;
@@ -142,25 +142,25 @@
     descriptor.m_DilationY   = 2;
     descriptor.m_BiasEnabled = true;
 
-    const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
-    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    const TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+    const TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    const TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+    const TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
 
     std::vector<std::vector<int32_t>> inputShape  = {{ 1, 5, 5, 1 }};
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 3, 3, 1 }};
 
     std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
-    armnn::ConstTensor weights(weightsInfo, weightsData);
+    ConstTensor weights(weightsInfo, weightsData);
 
     std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
-    armnn::ConstTensor biases(biasesInfo, biasesData);
+    ConstTensor biases(biasesInfo, biasesData);
 
-    armnn::IConnectableLayer* const inputLayer  = net->AddInputLayer(0, "input0");
-    armnn::IConnectableLayer* const weightsLayer = net->AddConstantLayer(weights, "weights");
-    armnn::IConnectableLayer* const biasesLayer = net->AddConstantLayer(biases, "biases");
-    armnn::IConnectableLayer* const convLayer   = net->AddConvolution2dLayer(descriptor, "conv2d");
-    armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0);
+    IConnectableLayer* const inputLayer  = net->AddInputLayer(0, "input0");
+    IConnectableLayer* const weightsLayer = net->AddConstantLayer(weights, "weights");
+    IConnectableLayer* const biasesLayer = net->AddConstantLayer(biases, "biases");
+    IConnectableLayer* const convLayer   = net->AddConvolution2dLayer(descriptor, "conv2d");
+    IConnectableLayer* const outputLayer = net->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
@@ -179,18 +179,18 @@
 
 TEST_CASE("GetTosaMapping_MaxPool2DLayer")
 {
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = PoolingAlgorithm::Max;
     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
     descriptor.m_StrideX = descriptor.m_StrideY = 2;
     descriptor.m_PadLeft = 1;
     descriptor.m_PadRight = 1;
     descriptor.m_PadTop = 1;
     descriptor.m_PadBottom = 1;
-    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_PaddingMethod = PaddingMethod::Exclude;
 
-    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
-    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
+    TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
+    TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
 
     std::vector<std::vector<int32_t>> inputShape  = {{ 1, 1, 4, 4 }};
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 1, 3, 3 }};
@@ -209,30 +209,30 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = PoolingAlgorithm::Max;
     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
     descriptor.m_StrideX = descriptor.m_StrideY = 2;
     descriptor.m_PadLeft = 1;
     descriptor.m_PadRight = 1;
     descriptor.m_PadTop = 1;
     descriptor.m_PadBottom = 1;
-    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_PaddingMethod = PaddingMethod::Exclude;
 
-    IConnectableLayer* input0  = net->AddInputLayer(0, "input0");
-    IConnectableLayer* pool    = net->AddPooling2dLayer(descriptor, "pool");
-    IConnectableLayer* output  = net->AddOutputLayer(0, "output");
+    IConnectableLayer* input  = net->AddInputLayer(0, "input0");
+    IConnectableLayer* pool   = net->AddPooling2dLayer(descriptor, "pool");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
 
-    input0->GetOutputSlot(0).Connect(pool->GetInputSlot(0));
+    input->GetOutputSlot(0).Connect(pool->GetInputSlot(0));
     pool->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
-    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
+    TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
+    TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
 
     std::vector<std::vector<int32_t>> inputShape  = {{ 1, 1, 4, 4 }};
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 1, 3, 3 }};
 
-    input0->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
     pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     TosaSerializationBasicBlock* basicBlock =
@@ -243,18 +243,18 @@
 
 TEST_CASE("GetTosaMapping_AvgPool2DLayer")
 {
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = PoolingAlgorithm::Average;
     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
     descriptor.m_StrideX = descriptor.m_StrideY = 2;
     descriptor.m_PadLeft = 1;
     descriptor.m_PadRight = 1;
     descriptor.m_PadTop = 1;
     descriptor.m_PadBottom = 1;
-    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_PaddingMethod = PaddingMethod::Exclude;
 
-    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
-    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
+    TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
+    TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
 
     std::vector<std::vector<int32_t>> inputShape  = {{ 1, 1, 4, 4 }};
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 1, 3, 3 }};
@@ -278,15 +278,15 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = PoolingAlgorithm::Average;
     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
     descriptor.m_StrideX = descriptor.m_StrideY = 2;
     descriptor.m_PadLeft = 1;
     descriptor.m_PadRight = 1;
     descriptor.m_PadTop = 1;
     descriptor.m_PadBottom = 1;
-    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_PaddingMethod = PaddingMethod::Exclude;
 
     IConnectableLayer* input0  = net->AddInputLayer(0, "input0");
     IConnectableLayer* pool    = net->AddPooling2dLayer(descriptor, "pool");
@@ -295,8 +295,8 @@
     input0->GetOutputSlot(0).Connect(pool->GetInputSlot(0));
     pool->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
-    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
+    TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32);
+    TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32);
 
     std::vector<std::vector<int32_t>> inputShape  = {{ 1, 1, 4, 4 }};
     std::vector<std::vector<int32_t>> outputShape = {{ 1, 1, 3, 3 }};
@@ -315,6 +315,66 @@
                                         LayerType::Pooling2d);
 }
 
+TEST_CASE("GetTosaMapping_ReshapeLayer")
+{
+    TensorInfo inputInfo = TensorInfo({ 2, 3 }, DataType::Float32);
+    TensorInfo outputInfo = TensorInfo({ 6 }, DataType::Float32);
+
+    std::vector<std::vector<int32_t>> inputShape  = {{ 2, 3 }};
+    std::vector<std::vector<int32_t>> outputShape = {{ 6 }};
+
+    ReshapeDescriptor descriptor;
+    descriptor.m_TargetShape = { 6 };
+
+    TosaSerializationBasicBlock* basicBlock =
+        GetTosaMapping(nullptr, LayerType::Reshape, {&inputInfo}, {&outputInfo}, descriptor);
+    AssertTosaOneToOneMappingBasicBlock(basicBlock,
+                                        inputShape,
+                                        outputShape,
+                                        Op_RESHAPE,
+                                        Attribute_ReshapeAttribute,
+                                        descriptor,
+                                        LayerType::Reshape);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_ReshapeLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    ReshapeDescriptor descriptor;
+    descriptor.m_TargetShape = { 6 };
+
+    IConnectableLayer* input   = net->AddInputLayer(0, "input");
+    IConnectableLayer* reshape = net->AddReshapeLayer(descriptor, "reshape");
+    IConnectableLayer* output  = net->AddOutputLayer(0, "output");
+
+    input->GetOutputSlot(0).Connect(reshape->GetInputSlot(0));
+    reshape->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    TensorInfo inputInfo = TensorInfo({ 2, 3 }, DataType::Float32);
+    TensorInfo outputInfo = TensorInfo({ 6 }, DataType::Float32);
+
+    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    reshape->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    std::vector<std::vector<int32_t>> inputShape  = {{ 2, 3 }};
+    std::vector<std::vector<int32_t>> outputShape = {{ 6 }};
+
+    TosaSerializationBasicBlock* basicBlock =
+        GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(reshape));
+    AssertTosaOneToOneMappingBasicBlock(basicBlock,
+                                        inputShape,
+                                        outputShape,
+                                        Op_RESHAPE,
+                                        Attribute_ReshapeAttribute,
+                                        descriptor,
+                                        LayerType::Reshape);
+}
+
 TEST_CASE("GetTosaMapping_Unimplemented")
 {
     TosaSerializationBasicBlock* basicBlock =
diff --git a/src/backends/tosaCommon/test/TosaTestUtils.hpp b/src/backends/tosaCommon/test/TosaTestUtils.hpp
index dd63c0e..5c10a6d 100644
--- a/src/backends/tosaCommon/test/TosaTestUtils.hpp
+++ b/src/backends/tosaCommon/test/TosaTestUtils.hpp
@@ -8,16 +8,20 @@
 #include <Layer.hpp>
 
 #include <tosaCommon/TosaMappings.hpp>
+#include <tosaCommon/operatorMappings/TosaOperatorUtils.hpp>
 
 #include <doctest/doctest.h>
+#include <numeric>
 
 using namespace armnn;
 using namespace tosa;
 
-inline void VerifyTosaAttributeFromDescriptor(const BaseDescriptor& descriptor,
-                                              const TosaAttributeBase* attribute,
-                                              LayerType type,
-                                              uint32_t mappingOpNumber = 0)
+inline void VerifyTosaAttribute(const BaseDescriptor& descriptor,
+                                const TosaAttributeBase* attribute,
+                                std::vector<int32_t> inputShape,
+                                std::vector<int32_t> outputShape,
+                                LayerType type,
+                                uint32_t mappingOpNumber = 0)
 {
     switch (type)
     {
@@ -100,6 +104,25 @@
             CHECK(stride == poolAttribute.stride());
             break;
         }
+        case LayerType::Reshape:
+        {
+            auto reshapeDesc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
+            TosaReshapeAttribute reshapeAttribute(attribute);
+            std::vector<int32_t> shapeAttrib = reshapeAttribute.new_shape();
+
+            CHECK(GetTosaTensorShape(reshapeDesc->m_TargetShape) == shapeAttrib);
+            CHECK(outputShape == shapeAttrib);
+
+            auto numInputElements = std::accumulate(std::begin(inputShape),
+                                                    std::end(inputShape),
+                                                    1,
+                                                    std::multiplies<int32_t>());
+            auto numAttributeShapeElements = std::accumulate(std::begin(shapeAttrib),
+                                                             std::end(shapeAttrib),
+                                                             1,
+                                                             std::multiplies<int32_t>());
+            CHECK(numInputElements == numAttributeShapeElements);
+        }
         default:
             break;
     }
@@ -195,7 +218,22 @@
         }
     }
 
-    VerifyTosaAttributeFromDescriptor(descriptor,
-                                      op->GetAttribute(),
-                                      type);
+    std::vector<int32_t> input = {};
+    std::vector<int32_t> output = {};
+
+    if (!inputShape.empty())
+    {
+        input = inputShape[0];
+    }
+
+    if (!outputShape.empty())
+    {
+        output = outputShape[0];
+    }
+
+    VerifyTosaAttribute(descriptor,
+                        op->GetAttribute(),
+                        input,
+                        output,
+                        type);
 }
\ No newline at end of file
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index 848b7ef..5cda85a 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -36,7 +36,7 @@
 
     for (auto input : inputs)
     {
-        std::string dataTypeCode = std::to_string(input->GetDtype());
+        std::string dataTypeCode = TosaDTypeToString(input->GetDtype());
 
         // Check Dtype from tensor (GetDtype)
         supported &= CheckSupportRule(TosaTypeAnyOf(input, supportedTypes),
@@ -54,7 +54,7 @@
 
     for (auto output : outputs)
     {
-        std::string dataTypeCode = std::to_string(output->GetDtype());
+        std::string dataTypeCode = TosaDTypeToString(output->GetDtype());
 
         // Check Dtype from tensor (GetDtype)
         supported &= CheckSupportRule(TosaTypeAnyOf(output, supportedTypes),
@@ -97,8 +97,8 @@
     {
         auto input = inputs[i];
         auto output = outputs[i];
-        std::string inputDataTypeCode = std::to_string(input->GetDtype());
-        std::string outputDataTypeCode = std::to_string(output->GetDtype());
+        std::string inputDataTypeCode = TosaDTypeToString(input->GetDtype());
+        std::string outputDataTypeCode = TosaDTypeToString(output->GetDtype());
         std::tuple<DType, DType> mappingType(input->GetDtype(), output->GetDtype());
 
         // Check Dtype from tensor (GetDtype)
@@ -285,6 +285,24 @@
             return RunTosaLayerChecksSingleDataType(
                     op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported);
         }
+        case tosa::Op_RESHAPE:
+        {
+            std::vector<Attribute> supportedAttributes = { Attribute_ReshapeAttribute };
+
+            std::vector<DType> supportedTypes =
+            {
+                DType_FP16,
+                DType_FP32,
+                DType_INT8,
+                DType_INT16,
+                DType_INT32,
+                DType_BOOL
+            };
+
+            // Check the attribute, data types and bounds for inputs and outputs.
+            return RunTosaLayerChecksSingleDataType(
+                op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported);
+        }
         default:
             SetValueChecked(reasonIfUnsupported, "Operation is currently unsupported by the TOSA Reference Backend.");
             return false;
@@ -332,6 +350,7 @@
             break;
         }
         case LayerType::Pooling2d:
+        case LayerType::Reshape:
             // Setup inputs and outputs
             inputInfos.push_back(&infos[0]);
             outputInfos.push_back(&infos[1]);
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index 4245f0d..aaf8a67 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -8,6 +8,7 @@
 #include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
+#include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
 
 #include <doctest/doctest.h>
 
@@ -74,4 +75,20 @@
     AvgPool2dEndToEnd<DataType::Float32>(tosaDefaultBackends, PaddingMethod::IgnoreValue);
 }
 
+// Reshape
+TEST_CASE("TosaRefReshapeEndtoEndTestFloat32")
+{
+    ReshapeEndToEnd<DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefReshapeEndtoEndTestInt32")
+{
+    ReshapeEndToEnd<DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefReshapeEndtoEndTestFloat16")
+{
+    ReshapeEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
+}
+
 }
\ No newline at end of file
diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
index e6fbbf9..86b01d8 100644
--- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
@@ -12,26 +12,28 @@
 
 #include <string>
 
+using namespace armnn;
+
 TEST_SUITE("TosaRefLayerSupported")
 {
 
 TEST_CASE("IsLayerSupportedTosaReferenceAddition")
 {
-    armnn::TensorShape shape0 = {1,1,3,4};
-    armnn::TensorShape shape1 = {4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in0(shape0, armnn::DataType::Float32);
-    armnn::TensorInfo in1(shape1, armnn::DataType::Float32);
-    armnn::TensorInfo out(outShape, armnn::DataType::Float32);
+    TensorShape shape0 = {1,1,3,4};
+    TensorShape shape1 = {4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in0(shape0, DataType::Float32);
+    TensorInfo in1(shape1, DataType::Float32);
+    TensorInfo out(outShape, DataType::Float32);
 
-    armnn::BaseDescriptor desc;
-    armnn::TosaRefLayerSupport supportChecker;
+    BaseDescriptor desc;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Addition,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Addition,
                                                      {in0, in1, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(supported);
@@ -39,21 +41,21 @@
 
 TEST_CASE("IsLayerSupportedTosaReferenceAdditionUnsupported")
 {
-    armnn::TensorShape shape0 = {1,1,3,4};
-    armnn::TensorShape shape1 = {4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in0(shape0, armnn::DataType::Signed64);
-    armnn::TensorInfo in1(shape1, armnn::DataType::Signed64);
-    armnn::TensorInfo out(outShape, armnn::DataType::Signed64);
+    TensorShape shape0 = {1,1,3,4};
+    TensorShape shape1 = {4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in0(shape0, DataType::Signed64);
+    TensorInfo in1(shape1, DataType::Signed64);
+    TensorInfo out(outShape, DataType::Signed64);
 
-    armnn::BaseDescriptor desc;
-    armnn::TosaRefLayerSupport supportChecker;
+    BaseDescriptor desc;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Addition,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Addition,
                                                      {in0, in1, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(!supported);
@@ -63,19 +65,21 @@
         "TOSA Reference Operator: Op_ADD for input: input1_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
         "TOSA Reference Operator: Op_ADD for output: output0_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+        "has an unsupported data type: DType_UNKNOWN") != std::string::npos);
 }
 
 TEST_CASE("IsLayerSupportedTosaReferenceConstant")
 {
-    armnn::TensorInfo outputInfo({1,1,3,4}, armnn::DataType::Float32);
+    TensorInfo outputInfo({1,1,3,4}, DataType::Float32);
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Constant,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Constant,
                                                      {outputInfo},
-                                                     armnn::BaseDescriptor(),
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     BaseDescriptor(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(supported);
@@ -83,39 +87,41 @@
 
 TEST_CASE("IsLayerSupportedTosaReferenceConstantUnsupported")
 {
-    armnn::TensorInfo outputInfo({1,1,3,4}, armnn::DataType::Signed64);
+    TensorInfo outputInfo({1,1,3,4}, DataType::Signed64);
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Constant,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Constant,
                                                      {outputInfo},
-                                                     armnn::BaseDescriptor(),
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     BaseDescriptor(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(!supported);
     REQUIRE(reasonIfNotSupported.find(
             "TOSA Reference Operator: Op_CONST for output: constant_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+        "has an unsupported data type: DType_UNKNOWN") != std::string::npos);
 }
 
 TEST_CASE("IsLayerSupportedTosaReferenceConv2d")
 {
-    armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
-    armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-    armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-    armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+    TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+    TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32);
+    TensorInfo biasesInfo ({ 1 }, DataType::Float32);
 
-    armnn::Convolution2dDescriptor desc;
+    Convolution2dDescriptor desc;
     desc.m_BiasEnabled = true;
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
     auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d,
                                                      {inputInfo, outputInfo, weightsInfo, biasesInfo},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(supported);
@@ -124,21 +130,21 @@
 TEST_CASE("IsLayerSupportedTosaReferenceConv2dUnsupported")
 {
     // If inputs and weights are Fp32, output must match.
-    armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
-    armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Signed64);
-    armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
-    armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32);
+    TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Signed64);
+    TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true);
+    TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true);
 
-    armnn::Convolution2dDescriptor desc;
+    Convolution2dDescriptor desc;
     desc.m_BiasEnabled = true;
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
     auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d,
                                                      {inputInfo, outputInfo, weightsInfo, biasesInfo},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(!supported);
@@ -154,19 +160,19 @@
 
 TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2d")
 {
-    armnn::TensorShape inShape = {1,1,3,4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in(inShape, armnn::DataType::Float32);
-    armnn::TensorInfo out(outShape, armnn::DataType::Float32);
+    TensorShape inShape = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in(inShape, DataType::Float32);
+    TensorInfo out(outShape, DataType::Float32);
 
-    armnn::Pooling2dDescriptor desc;
-    armnn::TosaRefLayerSupport supportChecker;
+    Pooling2dDescriptor desc;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d,
                                                      {in, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(supported);
@@ -174,22 +180,22 @@
 
 TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2d_IgnoreValue")
 {
-    armnn::TensorShape inShape = {1,1,3,4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in(inShape, armnn::DataType::Float32);
-    armnn::TensorInfo out(outShape, armnn::DataType::Float32);
+    TensorShape inShape = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in(inShape, DataType::Float32);
+    TensorInfo out(outShape, DataType::Float32);
 
-    armnn::Pooling2dDescriptor desc;
-    desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-    desc.m_PoolType = armnn::PoolingAlgorithm::Average;
+    Pooling2dDescriptor desc;
+    desc.m_PaddingMethod = PaddingMethod::IgnoreValue;
+    desc.m_PoolType = PoolingAlgorithm::Average;
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d,
                                                      {in, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(supported);
@@ -197,22 +203,22 @@
 
 TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2d_InputOutputDatatypeDifferent")
 {
-    armnn::TensorShape inShape = {1,1,3,4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in(inShape, armnn::DataType::QAsymmS8);
-    armnn::TensorInfo out(outShape, armnn::DataType::Signed32);
+    TensorShape inShape = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in(inShape, DataType::QAsymmS8);
+    TensorInfo out(outShape, DataType::Signed32);
 
-    armnn::Pooling2dDescriptor desc;
-    desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-    desc.m_PoolType = armnn::PoolingAlgorithm::Average;
+    Pooling2dDescriptor desc;
+    desc.m_PaddingMethod = PaddingMethod::IgnoreValue;
+    desc.m_PoolType = PoolingAlgorithm::Average;
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d,
                                                      {in, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(supported);
@@ -220,19 +226,19 @@
 
 TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2dUnsupported")
 {
-    armnn::TensorShape inShape = {1,1,3,4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in(inShape, armnn::DataType::Signed64);
-    armnn::TensorInfo out(outShape, armnn::DataType::Signed64);
+    TensorShape inShape = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in(inShape, DataType::Signed64);
+    TensorInfo out(outShape, DataType::Signed64);
 
-    armnn::Pooling2dDescriptor desc;
-    armnn::TosaRefLayerSupport supportChecker;
+    Pooling2dDescriptor desc;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d,
                                                      {in, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(!supported);
@@ -240,26 +246,28 @@
         "TOSA Reference Operator: Op_MAX_POOL2D for input: input0_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
         "TOSA Reference Operator: Op_MAX_POOL2D for output: output0_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+        "has an unsupported data type: DType_UNKNOWN") != std::string::npos);
 }
 
 TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2dUnsupported_InputOutputDatatypeDifferent")
 {
-    armnn::TensorShape inShape = {1,1,3,4};
-    armnn::TensorShape outShape = {1,1,3,4};
-    armnn::TensorInfo in(inShape, armnn::DataType::Float32);
-    armnn::TensorInfo out(outShape, armnn::DataType::Float16);
+    TensorShape inShape = {1,1,3,4};
+    TensorShape outShape = {1,1,3,4};
+    TensorInfo in(inShape, DataType::Float32);
+    TensorInfo out(outShape, DataType::Float16);
 
-    armnn::Pooling2dDescriptor desc;
-    desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
-    desc.m_PoolType = armnn::PoolingAlgorithm::Average;
+    Pooling2dDescriptor desc;
+    desc.m_PaddingMethod = PaddingMethod::IgnoreValue;
+    desc.m_PoolType = PoolingAlgorithm::Average;
 
-    armnn::TosaRefLayerSupport supportChecker;
+    TosaRefLayerSupport supportChecker;
     std::string reasonIfNotSupported;
-    auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d,
+    auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d,
                                                      {in, out},
                                                      desc,
-                                                     armnn::EmptyOptional(),
-                                                     armnn::EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
                                                      reasonIfNotSupported);
 
     CHECK(!supported);
@@ -268,7 +276,57 @@
     REQUIRE(reasonIfNotSupported.find(
         " and output: output0_") != std::string::npos);
     REQUIRE(reasonIfNotSupported.find(
-        " has an unsupported input data type: 8 to output data type: 10") != std::string::npos);
+        " has an unsupported input data type: DType_FP32 to output data type: DType_FP16") != std::string::npos);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceReshape")
+{
+    TensorShape inShape = {3,4};
+    TensorShape outShape = {12};
+    TensorInfo in(inShape, DataType::Float32);
+    TensorInfo out(outShape, DataType::Float32);
+
+    ReshapeDescriptor desc;
+    desc.m_TargetShape = {12};
+
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::Reshape,
+                                                     {in, out},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceReshapeUnsupported")
+{
+    TensorShape inShape = {3,4};
+    TensorShape outShape = {12};
+    TensorInfo in(inShape, DataType::Signed64);
+    TensorInfo out(outShape, DataType::Signed64);
+
+    ReshapeDescriptor desc;
+    desc.m_TargetShape = {12};
+
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::Reshape,
+                                                     {in, out},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(!supported);
+    REQUIRE(reasonIfNotSupported.find(
+        "TOSA Reference Operator: Op_RESHAPE for input: input0_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+        "TOSA Reference Operator: Op_RESHAPE for output: output0_") != std::string::npos);
+    REQUIRE(reasonIfNotSupported.find(
+        "has an unsupported data type: DType_UNKNOWN") != std::string::npos);
 }
 
 }