IVGCVSW-7170 Add Concat support to TOSA Reference Backend

* Change comment for the unique tensor names in all tosa common operators

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I247b4b2365d5f0173218c5dfd11fba12d2399959
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index c8d20da..2614523 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -112,7 +112,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -174,7 +174,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType>
@@ -236,7 +236,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -297,7 +297,7 @@
     std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
 
-    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends);
+    EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(net), inputTensorData, expectedOutputData, backends);
 }
 
 } // anonymous namespace
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 7ecf726..1452e4a 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -27,6 +27,11 @@
         {
             return ConvertAdditionToTosaOperator(layer, inputs, outputs);
         }
+        case LayerType::Concat:
+        {
+            auto concatDesc = PolymorphicDowncast<const OriginsDescriptor*>(&descriptor);
+            return ConvertConcatToTosaOperator(layer, inputs, outputs, concatDesc);
+        }
         case LayerType::Constant:
         {
             return ConvertConstantToTosaOperator(layer, outputs);
diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
index 20ba146..7014886 100644
--- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp
@@ -18,14 +18,14 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique layer names.
+        // Get the layers connected to the input slots and determine unique tensors names.
         Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
         input0Name = GenerateUniqueName(connectedLayer0, 0);
 
         Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer();
         input1Name = GenerateUniqueName(connectedLayer1, 1);
 
-        // Get the layer connected to the output slot and determine unique layer name.
+        // Determine unique output tensor name.
         outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
index d268c2f..61de0ae 100644
--- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp
@@ -19,11 +19,11 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique layer names.
+        // Get the layers connected to the input slots and determine unique tensors names.
         Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
         padInputName = GenerateUniqueName(connectedInputLayer, 0);
 
-        // Get the layer connected to the output slot and determine unique layer name.
+        // Determine unique output tensor name.
         poolOutputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
index 90c1a4f..2443dc0 100644
--- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
+++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt
@@ -8,6 +8,8 @@
         AdditionOperator.cpp
         AvgPool2DIgnoreValueOperator.hpp
         AvgPool2DIgnoreValueOperator.cpp
+        ConcatOperator.hpp
+        ConcatOperator.cpp
         ConstantOperator.hpp
         ConstantOperator.cpp
         Conv2dOperator.hpp
diff --git a/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp b/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
new file mode 100644
index 0000000..8c651be
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ConcatOperator.cpp
@@ -0,0 +1,81 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConcatOperator.hpp"
+
+TosaSerializationBasicBlock* ConvertConcatToTosaOperator(const Layer* layer,
+                                                         const std::vector<const TensorInfo*>& inputs,
+                                                         const std::vector<const TensorInfo*>& outputs,
+                                                         const OriginsDescriptor* concatDescriptor)
+{
+    auto numInputs = inputs.size();
+    std::vector<std::string> inputNames;
+    inputNames.reserve(numInputs);
+    std::string outputName = std::string("output0_");
+    std::string blockName  = std::string("Op_CONCAT_block_") + GetUniqueTosaMappingID();
+
+    // Set input names for validation purposes only.
+    if (layer == nullptr)
+    {
+        for (uint32_t i = 0; i < numInputs; ++i)
+        {
+            inputNames.push_back("input"+ std::to_string(i) +"_");
+        }
+    }
+    // If a layer is present then the block will be used for execution, so input and output names need to be determined
+    // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+    else
+    {
+        // Get the layers connected to the input slots and determine unique tensor names.
+        for (uint32_t i = 0; i < numInputs; ++i)
+        {
+            Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
+
+            std::string inputName = GenerateUniqueName(connectedLayer, i);
+            inputNames.push_back(inputName);
+        }
+
+        // Determine unique output tensor name.
+        outputName = GenerateUniqueOutputName(*layer, 0);
+    }
+
+    auto axis = static_cast<int32_t>(concatDescriptor->GetConcatAxis());
+    TosaAxisAttribute attribute(axis);
+
+    TosaSerializationOperator* op = new TosaSerializationOperator(Op_CONCAT,
+                                                                  Attribute_AxisAttribute,
+                                                                  &attribute,
+                                                                  inputNames,
+                                                                  {outputName});
+
+    std::vector<TosaSerializationTensor*> tensors;
+    tensors.reserve(numInputs);
+
+    for (uint32_t i = 0; i < numInputs; ++i)
+    {
+        // Only add input tensors for validation or when the connected layer is an input layer.
+        // As there can't be duplicate tensors and intermediate or constant tensors are created separately.
+        if(inputNames[i].find("input") != std::string::npos)
+        {
+            std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[i]->GetShape());
+            DType inputDType = ArmNNToDType(inputs[i]->GetDataType());
+            tensors.push_back(new TosaSerializationTensor(inputNames[i], inputShape, inputDType, {}));
+        }
+    }
+
+    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
+    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
+
+    TosaSerializationTensor* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {});
+    tensors.push_back(outputTensor0);
+
+    // operatorInputNames/operatorOutputNames ends up being the same as
+    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+    return new TosaSerializationBasicBlock(blockName,     // name
+                                           {op},          // operators
+                                           tensors,       // tensors
+                                           inputNames,    // inputs
+                                           {outputName}); // outputs
+}
\ No newline at end of file
diff --git a/src/backends/tosaCommon/operatorMappings/ConcatOperator.hpp b/src/backends/tosaCommon/operatorMappings/ConcatOperator.hpp
new file mode 100644
index 0000000..e6094ce
--- /dev/null
+++ b/src/backends/tosaCommon/operatorMappings/ConcatOperator.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+#include <tosa_serialization_handler.h>
+
+#include "TosaOperatorUtils.hpp"
+
+using namespace armnn;
+using namespace tosa;
+
+TosaSerializationBasicBlock* ConvertConcatToTosaOperator(const Layer* layer,
+                                                         const std::vector<const TensorInfo*>& inputs,
+                                                         const std::vector<const TensorInfo*>& outputs,
+                                                         const OriginsDescriptor* concatDescriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
index 6fc1678..a7af083 100644
--- a/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
@@ -24,21 +24,21 @@
             inputNames.emplace_back("input2_");
         }
     }
+    // If a layer is present then the block will be used for execution, so input and output names need to be
+    // determined using the previous and following layers so the graph is connected correctly.
+    // For validation this doesn't matter.
     else
     {
-        // If a layer is present then the block will be used for execution, so input and output names need to be
-        // determined using the previous and following layers so the graph is connected correctly.
-        // For validation this doesn't matter.
+        // Get the layer connected to the input slot and determine unique tensor names.
         for (uint32_t i = 0; i < inputs.size(); ++i)
         {
-            // Get the layer connected to the input slot and determine unique layer name.
             Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();
 
             std::string inputName = GenerateUniqueName(connectedLayer, i);
             inputNames.push_back(inputName);
         }
 
-        // Get the layer connected to the output slot and determine unique layer name.
+        // Determine unique output tensor name.
         outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
index ee02425..444d99a 100644
--- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp
@@ -21,11 +21,11 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique layer names.
+        // Get the layers connected to the input slots and determine unique tensor names.
         Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
         input0Name = GenerateUniqueName(connectedInputLayer, 0);
 
-        // Get the layer connected to the output slot and determine unique layer name.
+        // Determine unique output tensor name.
         outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
index 3027e2e..10670ec 100644
--- a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp
@@ -18,11 +18,11 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique layer names.
+        // Get the layers connected to the input slots and determine unique tensor names.
         Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
         inputName = GenerateUniqueName(connectedLayer, 0);
 
-        // Get the layer connected to the output slot and determine unique layer name.
+        // Determine unique output tensor name.
         outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
index 742ba88..b98576f 100644
--- a/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/SliceOperator.cpp
@@ -18,11 +18,11 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
-        // Get the layers connected to the input slots and determine unique layer names.
+        // Get the layers connected to the input slots and determine unique tensor names.
         Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
         inputName = GenerateUniqueName(connectedLayer, 0);
 
-        // Get the layer connected to the output slot and determine unique layer name.
+        // Determine unique output tensor name.
         outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
index 1a9d6be..052c54c 100644
--- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
+++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp
@@ -6,6 +6,7 @@
 #pragma once
 
 #include "AdditionOperator.hpp"
+#include "ConcatOperator.hpp"
 #include "ConstantOperator.hpp"
 #include "Conv2dOperator.hpp"
 #include "AvgPool2DIgnoreValueOperator.hpp"
diff --git a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
index 1ad8c95..c8af5c2 100644
--- a/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/TransposeConv2dOperator.cpp
@@ -22,9 +22,11 @@
     // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
     if(layer != nullptr)
     {
+        // Get the layers connected to the input slots and determine unique tensor names.
         Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
         input0Name = GenerateUniqueName(connectedInputLayer, 0);
 
+        // Determine unique output tensor name.
         outputName = GenerateUniqueOutputName(*layer, 0);
     }
 
diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
index e74c638..b3ab14a 100644
--- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
+++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp
@@ -56,6 +56,82 @@
         basicBlock, inputShape, outputShape, Op_ADD, Attribute_NONE, BaseDescriptor(), LayerType::Addition);
 }
 
+TEST_CASE("GetTosaMapping_ConcatLayer")
+{
+    std::vector<armnn::TensorShape> inputTensorShapes = { { 2, 3, 2, 2 }, { 2, 3, 2, 2 } };
+    armnn::TensorInfo input0Info(inputTensorShapes[0], DataType::Float32);
+    armnn::TensorInfo input1Info(inputTensorShapes[1], DataType::Float32);
+    armnn::TensorInfo outputInfo({ 2, 6, 2, 2 }, DataType::Float32);
+
+    armnn::OriginsDescriptor descriptor;
+    unsigned int concatAxis = 1;
+    descriptor.SetConcatAxis(concatAxis);
+    descriptor = armnn::CreateDescriptorForConcatenation(inputTensorShapes.begin(),
+                                                         inputTensorShapes.end(),
+                                                         concatAxis);
+
+    TosaSerializationBasicBlock* basicBlock =
+            GetTosaMapping(nullptr, LayerType::Concat, {&input0Info,&input1Info}, {&outputInfo}, descriptor);
+
+    std::vector<std::vector<int32_t>> inputShapes = { { 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+    std::vector<std::vector<int32_t>> outputShape = { { 2, 6, 2, 2 } };
+
+    AssertTosaOneToOneMappingBasicBlock(basicBlock,
+                                        inputShapes,
+                                        outputShape,
+                                        Op_CONCAT,
+                                        Attribute_AxisAttribute,
+                                        descriptor,
+                                        LayerType::Concat);
+}
+
+TEST_CASE("GetTosaMappingFromLayer_ConcatLayer")
+{
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    armnn::OriginsDescriptor descriptor;
+    unsigned int concatAxis = 1;
+    descriptor.SetConcatAxis(concatAxis);
+    std::vector<armnn::TensorShape> inputTensorShapes = { { 2, 3, 2, 2 }, { 2, 3, 2, 2 } };
+    descriptor = armnn::CreateDescriptorForConcatenation(inputTensorShapes.begin(),
+                                                         inputTensorShapes.end(),
+                                                         concatAxis);
+
+    IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
+    IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
+    IConnectableLayer* concat = net->AddConcatLayer(descriptor, "concat");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    input0->GetOutputSlot(0).Connect(concat->GetInputSlot(0));
+    input1->GetOutputSlot(0).Connect(concat->GetInputSlot(1));
+    concat->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+
+    TensorInfo inputInfo0 = TensorInfo(inputTensorShapes[0], DataType::Float32, 0.0f, 0, true);
+    TensorInfo inputInfo1 = TensorInfo(inputTensorShapes[1], DataType::Float32, 0.0f, 0, true);
+    armnn::TensorInfo outputInfo({ 2, 6, 2, 2 }, DataType::Float32);
+
+    input0->GetOutputSlot(0).SetTensorInfo(inputInfo0);
+    input1->GetOutputSlot(0).SetTensorInfo(inputInfo1);
+    concat->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    std::vector<std::vector<int32_t>> inputShapes = { { 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+    std::vector<std::vector<int32_t>> outputShape = { { 2, 6, 2, 2 } };
+
+    TosaSerializationBasicBlock* basicBlock = GetTosaMappingFromLayer(PolymorphicDowncast<Layer*>(concat));
+    AssertTosaOneToOneMappingBasicBlock(basicBlock,
+                                        inputShapes,
+                                        outputShape,
+                                        Op_CONCAT,
+                                        Attribute_AxisAttribute,
+                                        descriptor,
+                                        LayerType::Concat);
+}
+
 TEST_CASE("GetTosaMapping_ConstantLayer")
 {
     TensorInfo outputInfo = TensorInfo({ 1, 2, 4, 2 }, DataType::Float32, 0.0f, 0, true);
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index e5427eb..0d0d07a 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -43,6 +43,13 @@
             inputInfos.push_back(&infos[1]);
             outputInfos.push_back(&infos[2]);
             break;
+        case LayerType::Concat:
+            for (unsigned int i = 0; i < infos.size() - 1; ++i)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            outputInfos.push_back(&infos.back());
+            break;
         case LayerType::Constant:
             outputInfos.push_back(&infos[0]);
             break;
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index 49f2cb7..a377293 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -7,6 +7,7 @@
 
 #include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
+#include "backendsCommon/test/ConcatEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
 #include "backendsCommon/test/SliceEndToEndTestImpl.hpp"
@@ -34,6 +35,47 @@
     AdditionEndToEndFloat16<DataType::Float16>(tosaDefaultBackends);
 }
 
+// Concat
+TEST_CASE("TosaRefConcatEndToEndDim0TestFloat32")
+{
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim0TestInt32")
+{
+    ConcatDim0EndToEnd<armnn::DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim1TestFloat32")
+{
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim1TestInt32")
+{
+    ConcatDim1EndToEnd<armnn::DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim2TestFloat32")
+{
+    ConcatDim2EndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim2TestInt32")
+{
+    ConcatDim2EndToEnd<armnn::DataType::Signed32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim3TestFloat32")
+{
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(tosaDefaultBackends);
+}
+
+TEST_CASE("TosaRefConcatEndToEndDim3TestInt32")
+{
+    ConcatDim3EndToEnd<armnn::DataType::Signed32>(tosaDefaultBackends);
+}
+
 // Conv2d
 TEST_CASE("TosaRefConv2dEndtoEndTestFloat32")
 {
diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
index 3c3abc2..051965f 100644
--- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp
@@ -61,6 +61,58 @@
     CHECK(!supported);
 }
 
+TEST_CASE("IsLayerSupportedTosaReferenceConcat")
+{
+    TensorShape input0Shape = { 2, 3, 2, 2 };
+    TensorShape input1Shape = { 2, 3, 2, 2 };
+    TensorShape outputShape = { 2, 6, 2, 2 };
+    TensorInfo input0Info(input0Shape, DataType::Float32);
+    TensorInfo input1Info(input1Shape, DataType::Float32);
+    TensorInfo outputInfo(outputShape, DataType::Float32);
+
+    OriginsDescriptor descriptor;
+    std::vector<TensorShape> shapes = {input0Shape, input1Shape} ;
+    unsigned int concatAxis = 1;
+    descriptor = CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatAxis);
+
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::Concat,
+                                                     {input0Info, input1Info, outputInfo},
+                                                     descriptor,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(supported);
+}
+
+TEST_CASE("IsLayerSupportedTosaReferenceConcatUnsupported")
+{
+    TensorShape input0Shape = { 2, 3, 2, 2 };
+    TensorShape input1Shape = { 2, 3, 2, 2 };
+    TensorShape outputShape = { 2, 6, 2, 2 };
+    TensorInfo input0Info(input0Shape, armnn::DataType::QAsymmU8);
+    TensorInfo input1Info(input1Shape, armnn::DataType::QAsymmU8);
+    TensorInfo outputInfo(outputShape, armnn::DataType::QAsymmU8);
+
+    OriginsDescriptor descriptor;
+    std::vector<armnn::TensorShape> shapes = {input0Shape, input1Shape} ;
+    unsigned int concatAxis = 1;
+    descriptor = armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatAxis);
+
+    TosaRefLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::Concat,
+                                                     {input0Info, input1Info, outputInfo},
+                                                     descriptor,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+
+    CHECK(!supported);
+}
+
 TEST_CASE("IsLayerSupportedTosaReferenceConstant")
 {
     TensorInfo outputInfo({1,1,3,4}, DataType::Float32);