IVGCVSW-3076 Add ConcatLayer methods to public API

!android-nn-driver:1120

Change-Id: I5192fa3deb4ea9766d38ad0bf4dfbfa0b4924c41
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 831a846..47a0d3e 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -130,6 +130,18 @@
                                descriptor);
 }
 
+bool IsConcatSupported(const BackendId& backend,
+                       std::vector<const TensorInfo*> inputs,
+                       const TensorInfo& output,
+                       const OriginsDescriptor& descriptor,
+                       char* reasonIfUnsupported,
+                       size_t reasonIfUnsupportedMaxLength)
+{
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    return IsMergerSupported(backend, inputs, output, descriptor, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
+    ARMNN_NO_DEPRECATE_WARN_END
+}
+
 bool IsConstantSupported(const BackendId& backend,
                          const TensorInfo& output,
                          char* reasonIfUnsupported,
@@ -386,6 +398,7 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
 }
 
+ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
 bool IsMergerSupported(const BackendId& backend,
                        std::vector<const TensorInfo*> inputs,
                        const TensorInfo& output,
@@ -394,7 +407,10 @@
                        size_t reasonIfUnsupportedMaxLength)
 {
     BOOST_ASSERT(inputs.size() > 0);
+
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
+    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 bool IsMinimumSupported(const BackendId& backend,
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index c3f29d4..087ec0f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -602,6 +602,14 @@
     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
 }
 
+IConnectableLayer* Network::AddConcatLayer(const OriginsDescriptor& mergerDescriptor,
+                                           const char* name)
+{
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    return AddMergerLayer(mergerDescriptor, name);
+    ARMNN_NO_DEPRECATE_WARN_END
+}
+
 IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
                                                       const ConstTensor& weights,
                                                       const Optional<ConstTensor>& biases,
@@ -762,7 +770,7 @@
 }
 
 IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
-    const char* name)
+                                           const char* name)
 {
     return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
 }
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 660ca87..a569a7c 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -37,6 +37,9 @@
     IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                               const char* name = nullptr) override;
 
+    IConnectableLayer* AddConcatLayer(const OriginsDescriptor& mergerDescriptor,
+                                      const char* name = nullptr) override;
+
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const Optional<ConstTensor>& biases,
@@ -115,8 +118,9 @@
     IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
         const char* name = nullptr) override;
 
+    ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
     IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
-        const char* name = nullptr) override;
+                                      const char* name = nullptr) override;
 
     IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override;
 
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index c1e7c2c..38e33cf 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -267,7 +267,7 @@
                                         const OriginsDescriptor& mergerDescriptor,
                                         const char* name)
 {
-    IConnectableLayer* newLayer = m_QuantizedNetwork->AddMergerLayer(mergerDescriptor, name);
+    IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(mergerDescriptor, name);
     RecordLayer(layer, newLayer);
     SetQuantizedInputConnections(layer, newLayer);
 }
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index dd8eb77..155304b 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -254,7 +254,9 @@
     // Adds a merger layer.
     armnn::OriginsDescriptor mergerDesc(2, 4);
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* mergerLayer = net.AddMergerLayer(mergerDesc, "merger layer");
+    ARMNN_NO_DEPRECATE_WARN_END
     BOOST_TEST(mergerLayer);
 
     softmaxLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 2103de0..4f22317 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1283,7 +1283,9 @@
     IConnectableLayer* input2 = network->AddInputLayer(2);
 
     OriginsDescriptor descriptor(3, 1);
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer* merger = network->AddMergerLayer(descriptor);
+    ARMNN_NO_DEPRECATE_WARN_END
 
     IConnectableLayer* output0 = network->AddOutputLayer(3);
 
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index dbadb75..f94906d 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -239,7 +239,9 @@
     TestMergerLayerVisitor visitor(descriptor, layerName);
     Network net;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer *const layer = net.AddMergerLayer(descriptor, layerName);
+    ARMNN_NO_DEPRECATE_WARN_END
     layer->Accept(visitor);
 }
 
@@ -251,7 +253,9 @@
     TestMergerLayerVisitor visitor(descriptor);
     Network net;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer *const layer = net.AddMergerLayer(descriptor);
+    ARMNN_NO_DEPRECATE_WARN_END
     layer->Accept(visitor);
 }
 
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index 3cc0fb9..90579e6 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -589,7 +589,7 @@
     outputShape.set_dim(1, mergeDimSizes[1]);
 
     // Finally add the merge layer
-    IConnectableLayer* mergerLayer = m_Network->AddMergerLayer(mergeDesc, layerParam.name().c_str());
+    IConnectableLayer* mergerLayer = m_Network->AddConcatLayer(mergeDesc, layerParam.name().c_str());
 
     if (!mergerLayer)
     {
@@ -1325,7 +1325,7 @@
     }
     mergeDimSizes[concatDim] = mergeDim;
 
-    armnn::IConnectableLayer* concatlayer = m_Network->AddMergerLayer(concatDescriptor, layerParam.name().c_str());
+    armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str());
     for (unsigned int i = 0; i < numInputs; ++i)
     {
         armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast<int>(i)));
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 8b790f7..b7d45e0 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -209,7 +209,7 @@
     m_ParserFunctions[Layer_MeanLayer]                   = &Deserializer::ParseMean;
     m_ParserFunctions[Layer_MinimumLayer]                = &Deserializer::ParseMinimum;
     m_ParserFunctions[Layer_MergeLayer]                  = &Deserializer::ParseMerge;
-    m_ParserFunctions[Layer_MergerLayer]                 = &Deserializer::ParseMerger;
+    m_ParserFunctions[Layer_MergerLayer]                 = &Deserializer::ParseConcat;
     m_ParserFunctions[Layer_MultiplicationLayer]         = &Deserializer::ParseMultiplication;
     m_ParserFunctions[Layer_NormalizationLayer]          = &Deserializer::ParseNormalization;
     m_ParserFunctions[Layer_PadLayer]                    = &Deserializer::ParsePad;
@@ -1213,7 +1213,7 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
-void Deserializer::ParseMerger(GraphPtr graph, unsigned int layerIndex)
+void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
     CHECK_LOCATION();
@@ -1244,7 +1244,7 @@
     }
     descriptor.SetConcatAxis(mergerDescriptor->concatAxis());
 
-    IConnectableLayer* layer = m_Network->AddMergerLayer(descriptor, layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddConcatLayer(descriptor, layerName.c_str());
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index dfa5b06..c647ac3 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -82,6 +82,7 @@
     void ParseAdd(GraphPtr graph, unsigned int layerIndex);
     void ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex);
     void ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex);
+    void ParseConcat(GraphPtr graph, unsigned int layerIndex);
     void ParseConstant(GraphPtr graph, unsigned int layerIndex);
     void ParseConvolution2d(GraphPtr graph, unsigned int layerIndex);
     void ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex);
@@ -98,7 +99,6 @@
     void ParseMean(GraphPtr graph, unsigned int layerIndex);
     void ParseMinimum(GraphPtr graph, unsigned int layerIndex);
     void ParseMerge(GraphPtr graph, unsigned int layerIndex);
-    void ParseMerger(GraphPtr graph, unsigned int layerIndex);
     void ParseMultiplication(GraphPtr graph, unsigned int layerIndex);
     void ParseNormalization(GraphPtr graph, unsigned int layerIndex);
     void ParseLstm(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index 9cdeea2..5c61971 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -10,6 +10,7 @@
 * Addition
 * BatchToSpaceNd
 * BatchNormalization
+* Concat
 * Constant
 * Convolution2d
 * DepthwiseConvolution2d
@@ -26,7 +27,6 @@
 * Maximum
 * Mean
 * Merge
-* Merger
 * Minimum
 * Multiplication
 * Normalization
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index b022668..4b3a09e 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1276,7 +1276,9 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1);
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const mergerLayer = network->AddMergerLayer(descriptor, layerName.c_str());
+    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayerOne->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index fdb3812..036a881 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -1692,7 +1692,7 @@
     }
 
     auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
-    IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str());
+    IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
 
     BOOST_ASSERT(layer != nullptr);
 
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index d7cfba8..e5948d5 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -2120,7 +2120,7 @@
 
     // Update the output shape
     mergeDims[concatDim] = mergeDim;
-    armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
+    armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str());
 
     layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32));
 
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index b37fa33..7760c07 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -68,6 +68,16 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                                         const TensorInfo& output,
+                                         const OriginsDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
+    ARMNN_NO_DEPRECATE_WARN_END
+}
+
 bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 0c32a64..88d5792 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -37,6 +37,11 @@
                                    const BatchToSpaceNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                           const TensorInfo& output,
+                           const OriginsDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -173,6 +178,7 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
                            const OriginsDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index d9774b0..9ed0e29 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -554,7 +554,9 @@
 
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
 
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
             result = layerSupportObject->IsMergerSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+            ARMNN_NO_DEPRECATE_WARN_END
             break;
         }
         case LayerType::Multiplication:
diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp
index 2511bd4..35ab2bc 100644
--- a/src/backends/backendsCommon/test/MergerTestImpl.hpp
+++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp
@@ -33,7 +33,9 @@
     descriptor = CreateMergerDescriptorForConcatenation(inputShapes.begin(),
                                                         inputShapes.end(),
                                                         concatAxis);
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger");
+    ARMNN_NO_DEPRECATE_WARN_END
 
     for (unsigned int i = 0; i < inputShapes.size(); ++i)
     {
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 73c9e49..a557870 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -185,6 +185,16 @@
                                    descriptor);
 }
 
+bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                                       const TensorInfo& output,
+                                       const OriginsDescriptor& descriptor,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
+    ARMNN_NO_DEPRECATE_WARN_END
+}
+
 bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index e9a9e68..b634d46 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -36,6 +36,11 @@
                                    const BatchToSpaceNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                           const TensorInfo& output,
+                           const OriginsDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -138,6 +143,7 @@
                             const TensorInfo& output,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
                            const OriginsDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index c257dd3..9bd48cf 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -142,6 +142,16 @@
                                    descriptor);
 }
 
+bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                                         const TensorInfo& output,
+                                         const OriginsDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+     ARMNN_NO_DEPRECATE_WARN_BEGIN
+     return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
+     ARMNN_NO_DEPRECATE_WARN_END
+}
+
 bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index a5aae0b..8312bb9 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -31,6 +31,11 @@
                                        const BatchNormalizationDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                           const TensorInfo& output,
+                           const OriginsDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -101,6 +106,7 @@
                             const TensorInfo& output,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
                            const OriginsDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index a1d8e7d..f79c152 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -293,6 +293,16 @@
                                       &TrueFunc<>));
 }
 
+bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                                        const TensorInfo& output,
+                                        const OriginsDescriptor& descriptor,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
+    ARMNN_NO_DEPRECATE_WARN_END
+}
+
 bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 9b1a95c..a4ae01e 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -36,6 +36,11 @@
                                    const BatchToSpaceNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+                           const TensorInfo& output,
+                           const OriginsDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -162,6 +167,7 @@
                          const MeanDescriptor& descriptor,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
     bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
                            const OriginsDescriptor& descriptor,