IVGCVSW-3119 Rename MergerLayer to ConcatLayer

!android-nn-driver:1210

Change-Id: I940b3b9e421c92bfd55ae996f7bc54ac077f2604
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 9fcb496..71b1745 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -73,9 +73,7 @@
                                          const OriginsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
-    ARMNN_NO_DEPRECATE_WARN_END
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
 bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
@@ -286,7 +284,7 @@
                                          const OriginsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+    return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
 }
 
 bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index ea84c0b..9bb95f6 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -378,26 +378,26 @@
 }
 
 //---------------------------------------------------------------
-void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
-    ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
+    ValidateNumOutputs(workloadInfo, "ConcatQueueDescriptor", 1);
 
     if (m_Inputs.size() <= 0)
     {
-        throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
+        throw InvalidArgumentException("ConcatQueueDescriptor: At least one input needs to be provided.");
     }
     if (m_Outputs.size() <= 0)
     {
-        throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
+        throw InvalidArgumentException("ConcatQueueDescriptor: At least one output needs to be provided.");
     }
 
     if (workloadInfo.m_InputTensorInfos.size() <= 0)
     {
-        throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
+        throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo input needs to be provided.");
     }
     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
     {
-        throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
+        throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo output needs to be provided.");
     }
 
     if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
@@ -413,7 +413,7 @@
     if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
     {
         throw InvalidArgumentException(
-            "MergerQueueDescriptor: Number of split windows "
+            "ConcatQueueDescriptor: Number of split windows "
             "has to match number of workloadInfo.m_InputTensorInfos. "
             "Number of windows: " +
             to_string(m_ViewOrigins.size()) +
@@ -428,7 +428,7 @@
         ViewOrigin const& e = m_ViewOrigins[w];
         if (e.m_Origin.size() != outputDims)
         {
-            throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
+            throw InvalidArgumentException("ConcatQueueDescriptor: Window origin have to "
                                            "have the same dimensionality as the output tensor. "
                                            "Window origin (index: " +
                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
@@ -442,7 +442,7 @@
             if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
                 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
             {
-                throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
+                throw InvalidArgumentException("ConcatQueueDescriptor: Window extent coordinates have to "
                                                "be smaller or equal than the size of the output in that coord.");
             }
         }
@@ -463,11 +463,11 @@
     {
         ValidateDataTypes(workloadInfo.m_InputTensorInfos[i],
                           supportedTypes,
-                          "MergerQueueDescriptor");
+                          "ConcatQueueDescriptor");
     }
     ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
                       {workloadInfo.m_InputTensorInfos[0].GetDataType()},
-                      "MergerQueueDescriptor");
+                      "ConcatQueueDescriptor");
 }
 
 //---------------------------------------------------------------
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 689c6d2..3e33b94 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -9,6 +9,7 @@
 
 #include <InternalTypes.hpp>
 
+#include <armnn/Deprecated.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
 #include <armnn/Types.hpp>
@@ -87,8 +88,8 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
-// Merger layer workload data.
-struct MergerQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
+// Concat layer workload data.
+struct ConcatQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
 {
     struct ViewOrigin
     {
@@ -106,6 +107,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+// Deprecated. Use ConcatQueueDescriptor instead
+using MergerQueueDescriptor = ConcatQueueDescriptor;
+
 // Activation layer workload data.
 struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
 {
diff --git a/src/backends/backendsCommon/WorkloadDataFwd.hpp b/src/backends/backendsCommon/WorkloadDataFwd.hpp
index 9fbd81b..abee316 100644
--- a/src/backends/backendsCommon/WorkloadDataFwd.hpp
+++ b/src/backends/backendsCommon/WorkloadDataFwd.hpp
@@ -12,7 +12,7 @@
 struct QueueDescriptorWithParameters;
 struct SoftmaxQueueDescriptor;
 struct SplitterQueueDescriptor;
-struct MergerQueueDescriptor;
+struct ConcatQueueDescriptor;
 struct ActivationQueueDescriptor;
 struct FullyConnectedQueueDescriptor;
 struct PermuteQueueDescriptor;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 7631071..f026e1e 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -512,9 +512,9 @@
                                                           reason);
             break;
         }
-        case LayerType::Merger:
+        case LayerType::Concat:
         {
-            auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer);
+            auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
 
             // Get vector of all inputs.
             auto getTensorInfo = [&dataType](const InputSlot& slot)
@@ -535,9 +535,9 @@
 
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
 
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
-            result = layerSupportObject->IsMergerSupported(inputPtrs, output, cLayer->GetParameters(), reason);
-            ARMNN_NO_DEPRECATE_WARN_END
+            result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+
+
             break;
         }
         case LayerType::Multiplication:
@@ -816,7 +816,7 @@
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
     return std::unique_ptr<IWorkload>();
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 0b0ba7d..11c36eb 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -61,7 +61,7 @@
     virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
                                                             const WorkloadInfo& Info) const;
 
-    virtual std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+    virtual std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                                     const WorkloadInfo&          info) const;
 
     virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8050a0a..508fc77 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -30,7 +30,7 @@
     LayerTests.hpp
     LstmTestImpl.hpp
     NormTestImpl.hpp
-    MergerTestImpl.hpp
+    ConcatTestImpl.hpp
     MockBackend.cpp
     MockBackend.hpp
     MockBackendId.hpp
diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/ConcatTestImpl.hpp
similarity index 88%
rename from src/backends/backendsCommon/test/MergerTestImpl.hpp
rename to src/backends/backendsCommon/test/ConcatTestImpl.hpp
index 8483cf0..ded3857 100644
--- a/src/backends/backendsCommon/test/MergerTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatTestImpl.hpp
@@ -18,8 +18,8 @@
 {
 
 template<typename armnn::DataType DataType>
-INetworkPtr CreateMergerNetwork(const std::vector<TensorShape>& inputShapes,
-                                const TensorShape& outputShape,
+INetworkPtr CreateConcatNetwork(const std::vector<TensorShape>& inputShapes,
+                                const TensorShape &outputShape,
                                 unsigned int concatAxis,
                                 const float qScale = 1.0f,
                                 const int32_t qOffset = 0)
@@ -33,26 +33,24 @@
     descriptor = CreateDescriptorForConcatenation(inputShapes.begin(),
                                                   inputShapes.end(),
                                                   concatAxis);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger");
-    ARMNN_NO_DEPRECATE_WARN_END
+    IConnectableLayer* concat = net->AddConcatLayer(descriptor, "concat");
 
     for (unsigned int i = 0; i < inputShapes.size(); ++i)
     {
         TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
         IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
-        Connect(input, merger, inputTensorInfo, 0, i);
+        Connect(input, concat, inputTensorInfo, 0, i);
     }
 
     TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
     IConnectableLayer* output = net->AddOutputLayer(0, "output");
-    Connect(merger, output, outputTensorInfo, 0, 0);
+    Connect(concat, output, outputTensorInfo, 0, 0);
 
     return net;
 }
 
 template<armnn::DataType ArmnnType>
-void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim0EndToEnd(const std::vector<BackendId>& backends)
 {
     using namespace armnn;
     using T = ResolveType<ArmnnType>;
@@ -62,7 +60,7 @@
     const TensorShape& outputShape = { 4, 3, 2, 2 };
 
     // Builds up the structure of the network
-    INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+    INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
     BOOST_TEST_CHECKPOINT("create a network");
 
@@ -116,7 +114,7 @@
 }
 
 template<armnn::DataType ArmnnType>
-void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim1EndToEnd(const std::vector<BackendId>& backends)
 {
     using namespace armnn;
     using T = ResolveType<ArmnnType>;
@@ -126,7 +124,7 @@
     const TensorShape& outputShape = { 2, 6, 2, 2 };
 
     // Builds up the structure of the network
-    INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+    INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
     BOOST_TEST_CHECKPOINT("create a network");
 
@@ -180,7 +178,7 @@
 }
 
 template<armnn::DataType ArmnnType>
-void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim2EndToEnd(const std::vector<BackendId>& backends)
 {
     using namespace armnn;
     using T = ResolveType<ArmnnType>;
@@ -190,7 +188,7 @@
     const TensorShape& outputShape = { 2, 3, 4, 2 };
 
     // Builds up the structure of the network
-    INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+    INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
     BOOST_TEST_CHECKPOINT("create a network");
 
@@ -244,7 +242,7 @@
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim3EndToEnd(const std::vector<BackendId>& backends)
 {
     using namespace armnn;
 
@@ -253,7 +251,7 @@
     const TensorShape& outputShape = { 2, 3, 2, 4 };
 
     // Builds up the structure of the network
-    INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+    INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
     BOOST_TEST_CHECKPOINT("create a network");
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 3ff7376..7161464 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -135,19 +135,19 @@
 };
 
 template<>
-struct DummyLayer<armnn::MergerLayer>
+struct DummyLayer<armnn::ConcatLayer>
 {
     DummyLayer()
     {
         armnn::OriginsDescriptor desc(2);
-        m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
+        m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
 
     }
     ~DummyLayer()
     {
         dummyGraph.EraseLayer(m_Layer);
     }
-    armnn::MergerLayer* m_Layer;
+    armnn::ConcatLayer* m_Layer;
 };
 
 template<>
@@ -322,6 +322,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
 
+DECLARE_LAYER_POLICY_2_PARAM(Concat)
+
 DECLARE_LAYER_POLICY_1_PARAM(Constant)
 
 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
@@ -364,10 +366,6 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Merge)
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-DECLARE_LAYER_POLICY_2_PARAM(Merger)
-ARMNN_NO_DEPRECATE_WARN_END
-
 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
 
 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
@@ -422,7 +420,7 @@
 }
 
 template<>
-unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
+unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
 {
     boost::ignore_unused(layer);
     return 2;
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index c84a530..402e86d 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1362,10 +1362,10 @@
     );
 
     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
-    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
-    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
@@ -1381,7 +1381,7 @@
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::MergerQueueDescriptor data;
+    armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
@@ -3554,7 +3554,7 @@
 
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::MergerQueueDescriptor queueDescriptor;
+    armnn::ConcatQueueDescriptor queueDescriptor;
     armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
     queueDescriptor.m_Parameters = viewsDescriptor;
 
@@ -6625,10 +6625,10 @@
     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
@@ -6644,7 +6644,7 @@
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::MergerQueueDescriptor data;
+    armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
@@ -6759,10 +6759,10 @@
     );
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
 
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -6780,7 +6780,7 @@
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
 
-    armnn::MergerQueueDescriptor data;
+    armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
@@ -6892,10 +6892,10 @@
     }));
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
 
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -6913,7 +6913,7 @@
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
 
-    armnn::MergerQueueDescriptor data;
+    armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp
index b3b0631..a0c6553 100644
--- a/src/backends/backendsCommon/test/QuantizeHelper.hpp
+++ b/src/backends/backendsCommon/test/QuantizeHelper.hpp
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
 #include <armnn/ArmNN.hpp>
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 067cca8..94bef9b 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -234,7 +234,7 @@
 }
 
 
-BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow)
+BOOST_AUTO_TEST_CASE(ConcatQueueDescriptor_Validate_WrongWindow)
 {
     constexpr unsigned int inputNum = 1;
     constexpr unsigned int inputChannels = 3;
@@ -256,7 +256,7 @@
     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
 
-    MergerQueueDescriptor invalidData;
+    ConcatQueueDescriptor invalidData;
     WorkloadInfo          invalidInfo;
 
     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
@@ -264,7 +264,7 @@
 
     // Invalid, since it has only 3 dimensions while the input tensor is 4d.
     std::vector<unsigned int> wOrigin = {0, 0, 0};
-    armnn::MergerQueueDescriptor::ViewOrigin window(wOrigin);
+    armnn::ConcatQueueDescriptor::ViewOrigin window(wOrigin);
     invalidData.m_ViewOrigins.push_back(window);
 
     BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not "
@@ -273,18 +273,18 @@
 
     // Invalid, since window extends past the boundary of output tensor.
     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
-    armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3);
+    armnn::ConcatQueueDescriptor::ViewOrigin window3(wOrigin3);
     invalidData.m_ViewOrigins[0] = window3;
     BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
     BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
 
     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
-    armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4);
+    armnn::ConcatQueueDescriptor::ViewOrigin window4(wOrigin4);
     invalidData.m_ViewOrigins[0] = window4;
 
     std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
-    armnn::MergerQueueDescriptor::ViewOrigin window5(wOrigin4);
+    armnn::ConcatQueueDescriptor::ViewOrigin window5(wOrigin4);
     invalidData.m_ViewOrigins.push_back(window5);
 
     BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs.");
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index dfac289..78ac0e6 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -189,12 +189,43 @@
 
 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                                        const TensorInfo& output,
-                                       const OriginsDescriptor& descriptor,
+                                       const ConcatDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
-    ARMNN_NO_DEPRECATE_WARN_END
+    if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+    {
+        SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
+        return false;
+    }
+
+    unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+    if(concatInnerAxis < 3) // Width, height, or channels
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       inputs,
+                                       output,
+                                       descriptor);
+    }
+    else if (concatInnerAxis == 3)
+    {
+        // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
+        // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
+        for (auto& input : inputs)
+        {
+            if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+            {
+                SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
+                return false;
+            }
+        }
+        return true; // Sub-tensors support concat along batch
+    }
+    else // > 4 dimensions not supported.
+    {
+        SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
+        return false;
+    }
 }
 
 bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
@@ -442,43 +473,10 @@
 
 bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                                        const TensorInfo& output,
-                                       const OriginsDescriptor& descriptor,
+                                       const MergerDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
-    {
-        SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions.");
-        return false;
-    }
-
-    unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
-    if(concatInnerAxis < 3) // Width, height, or channels
-    {
-        FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
-                                       reasonIfUnsupported,
-                                       inputs,
-                                       output,
-                                       descriptor);
-    }
-    else if (concatInnerAxis == 3)
-    {
-        // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
-        // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
-        for (auto& input : inputs)
-        {
-            if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
-            {
-                SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match.");
-                return false;
-            }
-        }
-        return true; // Sub-tensors support concat along batch
-    }
-    else // > 4 dimensions not supported.
-    {
-        SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported.");
-        return false;
-    }
+    return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
 }
 
 bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index fca0bfd..64c1079 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -38,7 +38,7 @@
 
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
+                           const ConcatDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConstantSupported(const TensorInfo& output,
@@ -146,7 +146,7 @@
     ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
+                           const MergerDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index e7cf191..214b88d 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -265,7 +265,7 @@
     return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
                                                                   const WorkloadInfo&          info) const
 {
     return MakeWorkload<ClConcatWorkload>(descriptor, info);
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index e00672f..2722171 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -97,7 +97,7 @@
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+    std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 7f08b80..dc884e0 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -551,30 +551,30 @@
 }
 
 template <typename armnn::DataType DataType>
-static void ClSplitterMergerTest()
+static void ClSplitterConcatTest()
 {
     // Tests that it is possible to decide which output of the splitter layer
-    // should be lined to which input of the merger layer.
+    // should be lined to which input of the concat layer.
     // We test that is is possible to specify 0th output
-    // of the splitter to be the 1st input to the merger and the 1st output of the splitter  to be 0th input
-    // of the merger.
+    // of the splitter to be the 1st input to the concat and the 1st output of the splitter  to be 0th input
+    // of the concat.
 
     Graph graph;
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
 
     auto workloads =
-        CreateSplitterMergerWorkloadTest<ClSplitterWorkload, ClConcatWorkload, DataType>
+        CreateSplitterConcatWorkloadTest<ClSplitterWorkload, ClConcatWorkload, DataType>
             (factory, graph);
 
     auto wlSplitter = std::move(workloads.first);
-    auto wlMerger = std::move(workloads.second);
+    auto wlConcat = std::move(workloads.second);
 
     //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
     armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
     armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
-    armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
-    armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+    armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+    armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
     BOOST_TEST(sOut0);
     BOOST_TEST(sOut1);
@@ -593,14 +593,14 @@
     BOOST_TEST(validSubTensorParents);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloatWorkload)
 {
-    ClSplitterMergerTest<armnn::DataType::Float32>();
+    ClSplitterConcatTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16Workload)
 {
-    ClSplitterMergerTest<armnn::DataType::Float16>();
+    ClSplitterConcatTest<armnn::DataType::Float16>();
 }
 
 
@@ -801,17 +801,17 @@
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QuantisedAsymm8>();
 }
 
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void ClCreateMergerWorkloadTest(std::initializer_list<unsigned int> outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void ClCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
                                        unsigned int concatAxis)
 {
     Graph graph;
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
 
-    auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+    auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
 
-    MergerQueueDescriptor queueDescriptor = workload->GetData();
+    ConcatQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle0  = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle1  = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -821,34 +821,34 @@
     BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
 {
-    ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
 {
-    ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
 {
-    ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
 {
-    ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
 {
-    ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
 {
-    ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 9c010fc..3235f26 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -4,7 +4,7 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 
@@ -19,34 +19,34 @@
     ConstantUsageFloat32Test(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test)
 {
-    MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
 {
-    MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
 {
-    MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
 {
-    MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
 {
-    MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
 {
-    MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index ee4ba6b..fb28946 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -19,7 +19,7 @@
 
 namespace
 {
-size_t CalcAxis(const MergerDescriptor& desc)
+size_t CalcAxis(const OriginsDescriptor& desc)
 {
     return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
 }
@@ -27,7 +27,7 @@
 
 arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
                                              const TensorInfo& output,
-                                             const MergerDescriptor& descriptor)
+                                             const OriginsDescriptor& descriptor)
 {
     std::vector<arm_compute::TensorInfo> aclInputs;
     for (const TensorInfo* input : inputs)
@@ -46,8 +46,8 @@
     return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
 }
 
-ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
-: BaseWorkload<MergerQueueDescriptor>(descriptor, info)
+ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
+: BaseWorkload<ConcatQueueDescriptor>(descriptor, info)
 {
     bool allInputsAreSubtensors = true;
 
@@ -56,7 +56,7 @@
     {
         if (!input->GetParent())
         {
-            // Non sub-tensor input found so we need to execute the merger function
+            // Non sub-tensor input found so we need to execute the concat function
             allInputsAreSubtensors = false;
             break;
         }
@@ -64,7 +64,7 @@
 
     if (allInputsAreSubtensors)
     {
-        // Can skip configuring the merger function since it's not executed
+        // Can skip configuring the concat function since it's not executed
         return;
     }
 
diff --git a/src/backends/cl/workloads/ClConcatWorkload.hpp b/src/backends/cl/workloads/ClConcatWorkload.hpp
index 106193d..c34de9f 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.hpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.hpp
@@ -14,12 +14,12 @@
 
 arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
                                              const TensorInfo& output,
-                                             const MergerDescriptor& descriptor);
+                                             const OriginsDescriptor& descriptor);
 
-class ClConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class ClConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
 {
 public:
-    ClConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info);
+    ClConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info);
 
     void Execute() const override;
 
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index fd9aac5..e84eb79 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -146,12 +146,41 @@
 
 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                                          const TensorInfo& output,
-                                         const OriginsDescriptor& descriptor,
+                                         const ConcatDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-     ARMNN_NO_DEPRECATE_WARN_BEGIN
-     return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
-     ARMNN_NO_DEPRECATE_WARN_END
+    if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+    {
+        SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
+        return false;
+    }
+
+    unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+    if(concatInnerAxis < 3) // Width, height, or channels
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       inputs,
+                                       output,
+                                       descriptor);
+    }
+    else if (concatInnerAxis == 3)
+    {
+        for (auto& input : inputs)
+        {
+            if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+            {
+                SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
+                return false;
+            }
+        }
+        return true; // Sub-tensors support concat along batch
+    }
+    else // > 4 dimensions not supported.
+    {
+        SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
+        return false;
+    }
 }
 
 bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
@@ -326,41 +355,10 @@
 
 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                                          const TensorInfo& output,
-                                         const OriginsDescriptor& descriptor,
+                                         const MergerDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
-    {
-        SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
-        return false;
-    }
-
-    unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
-    if(concatInnerAxis < 3) // Width, height, or channels
-    {
-        FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
-                                       reasonIfUnsupported,
-                                       inputs,
-                                       output,
-                                       descriptor);
-    }
-    else if (concatInnerAxis == 3)
-    {
-        for (auto& input : inputs)
-        {
-            if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
-            {
-                SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
-                return false;
-            }
-        }
-        return true; // Sub-tensors support concat along batch
-    }
-    else // > 4 dimensions not supported.
-    {
-        SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
-        return false;
-    }
+     return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
 }
 
 bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 5e8e0bd..dd6ed79 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -33,7 +33,7 @@
 
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
+                           const ConcatDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConstantSupported(const TensorInfo& output,
@@ -109,7 +109,7 @@
     ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
+                           const MergerDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 3005dae..4b6225f 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -233,7 +233,7 @@
         m_MemoryManager->GetIntraLayerManager());
 }
 
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
                                                                     const WorkloadInfo&          info) const
 {
     return std::make_unique<NeonConcatWorkload>(descriptor, info);
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 60dbb90..6a28d12 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -98,7 +98,7 @@
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+    std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index b41d62f..8382365 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -504,30 +504,30 @@
     BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterMerger)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
 {
     // Tests that it is possible to decide which output of the splitter layer
-    // should be lined to which input of the merger layer.
+    // should be lined to which input of the concat layer.
     // We tested that is is possible to specify 0th output
-    // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input
-    // of the merger.
+    // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input
+    // of the concat.
 
     Graph graph;
     NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
 
     auto workloads =
-        CreateSplitterMergerWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
+        CreateSplitterConcatWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
             DataType::Float32>(factory, graph);
 
     auto wlSplitter = std::move(workloads.first);
-    auto wlMerger = std::move(workloads.second);
+    auto wlConcat = std::move(workloads.second);
 
     //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
     armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
     armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
-    armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
-    armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+    armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+    armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
     BOOST_TEST(sOut0);
     BOOST_TEST(sOut1);
@@ -632,17 +632,17 @@
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
 }
 
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void NeonCreateMergerWorkloadTest(std::initializer_list<unsigned int> outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void NeonCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
                                          unsigned int concatAxis)
 {
     Graph graph;
     NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
 
-    auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+    auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
 
-    MergerQueueDescriptor queueDescriptor = workload->GetData();
+    ConcatQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle0 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -652,34 +652,34 @@
     BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
 {
-    NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
 {
-    NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
 {
-    NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
 {
-    NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
 {
-    NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
 {
-    NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 441efed..15f5fc3 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -4,7 +4,7 @@
 //
 
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 
@@ -93,34 +93,34 @@
                                                                                             expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
 {
-    MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
 {
-    MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
 {
-    MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
 {
-    MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
 {
-    MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
 {
-    MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitDim0EndToEndTest)
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index 91f8109..8ea535b 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -19,7 +19,7 @@
 
 namespace
 {
-size_t CalcAxis(const armnn::MergerDescriptor& desc)
+size_t CalcAxis(const armnn::OriginsDescriptor& desc)
 {
     return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
 }
@@ -27,7 +27,7 @@
 
 arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
                                                const TensorInfo& output,
-                                               const MergerDescriptor& descriptor)
+                                               const OriginsDescriptor& descriptor)
 
 {
     std::vector<arm_compute::TensorInfo> aclInputs;
@@ -48,8 +48,8 @@
 }
 
 NeonConcatWorkload::NeonConcatWorkload(
-const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
-        : BaseWorkload<MergerQueueDescriptor>(descriptor, info)
+const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
+        : BaseWorkload<ConcatQueueDescriptor>(descriptor, info)
 {
     bool allInputsAreSubtensors = true;
 
@@ -58,7 +58,7 @@
     {
         if (!input->GetParent())
         {
-            // Non sub-tensor input found so we need to execute the merger function
+            // Non sub-tensor input found so we need to execute the concat function
             allInputsAreSubtensors = false;
             break;
         }
@@ -66,7 +66,7 @@
 
     if (allInputsAreSubtensors)
     {
-        // Can skip configuring the merger function since it's not executed
+        // Can skip configuring the concat function since it's not executed
         return;
     }
 
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.hpp b/src/backends/neon/workloads/NeonConcatWorkload.hpp
index e5a8d15..bf0733b 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.hpp
@@ -17,14 +17,14 @@
 {
 arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
                                                const TensorInfo& output,
-                                               const MergerDescriptor& descriptor);
+                                               const OriginsDescriptor& descriptor);
 
-class NeonConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class NeonConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
 {
 public:
-    NeonConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info);
+    NeonConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info);
 
-    using BaseWorkload<MergerQueueDescriptor>::BaseWorkload;
+    using BaseWorkload<ConcatQueueDescriptor>::BaseWorkload;
     void Execute() const override;
 
 private:
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2adcb10..9a691a6 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -316,18 +316,38 @@
 
 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                                         const TensorInfo& output,
-                                        const OriginsDescriptor& descriptor,
+                                        const ConcatDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
-    ARMNN_NO_DEPRECATE_WARN_END
+    ignore_unused(descriptor);
+
+    bool supported = true;
+    std::array<DataType,3> supportedTypes =
+    {
+            DataType::Float32,
+            DataType::QuantisedAsymm8,
+            DataType::QuantisedSymm16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference concatenation: output type not supported");
+    for (const TensorInfo* input : inputs)
+    {
+        supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
+            "Reference concatenation: input type not supported");
+
+        supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
+            "Reference concatenation: input and output types mismatched.");
+    }
+
+    return supported;
 }
 
 bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    std::array<DataType,4> supportedTypes = {
+    std::array<DataType,4> supportedTypes =
+    {
         DataType::Float32,
         DataType::Signed32,
         DataType::QuantisedAsymm8,
@@ -815,31 +835,10 @@
 
 bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                                         const TensorInfo& output,
-                                        const OriginsDescriptor& descriptor,
+                                        const MergerDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
-
-    bool supported = true;
-    std::array<DataType,3> supportedTypes =
-    {
-            DataType::Float32,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
-    };
-
-    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference concatenation: output type not supported");
-    for (const TensorInfo* input : inputs)
-    {
-        supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
-            "Reference concatenation: input type not supported");
-
-        supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
-            "Reference concatenation: input and output types mismatched.");
-    }
-
-    return supported;
+    return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
 }
 
 bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 944061d..8850c6e 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -38,7 +38,7 @@
 
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
+                           const ConcatDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConstantSupported(const TensorInfo& output,
@@ -170,7 +170,7 @@
     ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
+                           const MergerDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsMemCopySupported(const TensorInfo& input,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 1243328..a21becd 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -245,7 +245,7 @@
     return MakeWorkload<RefL2NormalizationFloat32Workload, NullWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
                                                                    const WorkloadInfo&          info) const
 {
     if (IsFloat16(info))
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 985b634..78f6bab 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -115,7 +115,7 @@
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+    std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 1c7f8dc..9a4cf14 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -21,7 +21,7 @@
         workloads/FullyConnected.cpp \
         workloads/Gather.cpp \
         workloads/Mean.cpp \
-        workloads/Merger.cpp \
+        workloads/Concatenate.cpp \
         workloads/Pad.cpp \
         workloads/Pooling2d.cpp \
         workloads/RefActivationWorkload.cpp \
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 3f4cc75..a96d656 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -473,28 +473,28 @@
     RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
 }
 
-template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
-static void RefCreateSplitterMergerWorkloadTest()
+template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
+static void RefCreateSplitterConcatWorkloadTest()
 {
     // Tests that it is possible to decide which output of the splitter layer
-    // should be lined to which input of the merger layer.
+    // should be lined to which input of the concat layer.
     // We tested that is is possible to specify 0th output
-    // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
-    // of the merger.
+    // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
+    // of the concat.
 
     Graph graph;
     RefWorkloadFactory factory;
-    auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
-        (factory, graph);
+    auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
+            (factory, graph);
 
     auto wlSplitter = std::move(workloads.first);
-    auto wlMerger = std::move(workloads.second);
+    auto wlConcat = std::move(workloads.second);
 
     //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
     armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
     armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
-    armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
-    armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+    armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+    armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
     BOOST_TEST(sOut0);
     BOOST_TEST(sOut1);
@@ -506,14 +506,14 @@
     BOOST_TEST(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
 {
-    RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefConcatWorkload, DataType::Float32>();
+    RefCreateSplitterConcatWorkloadTest<RefSplitterFloat32Workload, RefConcatWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
 {
-    RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefConcatWorkload, DataType::QuantisedAsymm8>();
+    RefCreateSplitterConcatWorkloadTest<RefSplitterUint8Workload, RefConcatWorkload, DataType::QuantisedAsymm8>();
 }
 
 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
@@ -671,13 +671,13 @@
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
 }
 
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void RefCreateMergerWorkloadTest(const armnn::TensorShape& outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
                                         unsigned int concatAxis)
 {
     Graph graph;
     RefWorkloadFactory factory;
-    auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+    auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
 
     CheckInputsOutput(std::move(workload),
                       TensorInfo({ 2, 3, 2, 5 }, DataType),
@@ -685,49 +685,49 @@
                       TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint16Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim2Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim2Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
 {
-    RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
 }
 
 template <typename ConstantWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 6dacfab..2b7fb77 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -7,7 +7,7 @@
 
 #include <backendsCommon/test/DetectionPostProcessTestImpl.hpp>
 #include <backendsCommon/test/GatherEndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 
@@ -396,44 +396,44 @@
                                                                                             expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
 {
-    MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
 {
-    MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
 {
-    MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
 {
-    MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
 {
-    MergerDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
 {
-    MergerDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
 {
-    MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
 {
-    MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 508dfdc..3db0314 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -28,8 +28,8 @@
     Gather.hpp
     LstmUtils.hpp
     Maximum.hpp
-    Merger.hpp
-    Merger.cpp
+    Concatenate.hpp
+    Concatenate.cpp
     Minimum.hpp
     Pad.cpp
     Pad.hpp
diff --git a/src/backends/reference/workloads/Merger.cpp b/src/backends/reference/workloads/Concatenate.cpp
similarity index 94%
rename from src/backends/reference/workloads/Merger.cpp
rename to src/backends/reference/workloads/Concatenate.cpp
index e0b70ee..bb55424 100644
--- a/src/backends/reference/workloads/Merger.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include "Merger.hpp"
+#include "Concatenate.hpp"
 #include "RefWorkloadUtils.hpp"
 #include "Decoders.hpp"
 #include "Encoders.hpp"
@@ -11,7 +11,7 @@
 namespace armnn
 {
 
-void Merger(const MergerQueueDescriptor& data)
+void Concatenate(const ConcatQueueDescriptor &data)
 {
     const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]);
 
@@ -34,7 +34,7 @@
 
         for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx)
         {
-            MergerQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx];
+            ConcatQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx];
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
diff --git a/src/backends/reference/workloads/Merger.hpp b/src/backends/reference/workloads/Concatenate.hpp
similarity index 80%
rename from src/backends/reference/workloads/Merger.hpp
rename to src/backends/reference/workloads/Concatenate.hpp
index eaa154d..ac82a87 100644
--- a/src/backends/reference/workloads/Merger.hpp
+++ b/src/backends/reference/workloads/Concatenate.hpp
@@ -10,5 +10,5 @@
 
 namespace armnn
 {
-void Merger(const MergerQueueDescriptor& data);
+void Concatenate(const ConcatQueueDescriptor &data);
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefConcatWorkload.cpp b/src/backends/reference/workloads/RefConcatWorkload.cpp
index 9abddc0..152eae9 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.cpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.cpp
@@ -5,7 +5,7 @@
 
 #include "RefConcatWorkload.hpp"
 
-#include "Merger.hpp"
+#include "Concatenate.hpp"
 
 #include "Profiling.hpp"
 
@@ -15,7 +15,7 @@
 void RefConcatWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConcatWorkload_Execute");
-    Merger(m_Data);
+    Concatenate(m_Data);
 }
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefConcatWorkload.hpp b/src/backends/reference/workloads/RefConcatWorkload.hpp
index 9fc9c7e..7d0b6b7 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.hpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.hpp
@@ -11,10 +11,10 @@
 namespace armnn
 {
 
-class RefConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class RefConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
 {
 public:
-    using BaseWorkload<MergerQueueDescriptor>::BaseWorkload;
+    using BaseWorkload<ConcatQueueDescriptor>::BaseWorkload;
     virtual void Execute() const override;
 };
 
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 20649d9..6ffec2b 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -38,7 +38,7 @@
 #include "RefPooling2dUint8Workload.hpp"
 #include "BatchNormImpl.hpp"
 #include "Activation.hpp"
-#include "Merger.hpp"
+#include "Concatenate.hpp"
 #include "RefSpaceToBatchNdWorkload.hpp"
 #include "RefSplitterFloat32Workload.hpp"
 #include "RefStridedSliceWorkload.hpp"