Revert "IVGCVSW-3808 Deprecation notices for old ElementwiseBinary layers"

This reverts commit 52e90bf59ecbe90d33368d8fc1fd120f07658aaf.

Change-Id: I5a0d244593d8e760ee7ba0c9d38c02377e1bdc24
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 44b1699..f634272 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -108,9 +108,7 @@
 #define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
 
 DECLARE_LAYER(Activation)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER(Addition)
-ARMNN_NO_DEPRECATE_WARN_END
 DECLARE_LAYER(ArgMinMax)
 DECLARE_LAYER(BatchMatMul)
 DECLARE_LAYER(BatchNormalization)
@@ -129,9 +127,7 @@
 DECLARE_LAYER(DepthwiseConvolution2d)
 DECLARE_LAYER(Dequantize)
 DECLARE_LAYER(DetectionPostProcess)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER(Division)
-ARMNN_NO_DEPRECATE_WARN_END
 DECLARE_LAYER(ElementwiseBinary)
 DECLARE_LAYER(ElementwiseUnary)
 DECLARE_LAYER(FakeQuantization)
@@ -147,17 +143,13 @@
 DECLARE_LAYER(LogSoftmax)
 DECLARE_LAYER(Lstm)
 DECLARE_LAYER(Map)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER(Maximum)
-ARMNN_NO_DEPRECATE_WARN_END
 DECLARE_LAYER(Mean)
 DECLARE_LAYER(MemCopy)
 DECLARE_LAYER(MemImport)
 DECLARE_LAYER(Merge)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER(Minimum)
 DECLARE_LAYER(Multiplication)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER(Normalization)
 DECLARE_LAYER(Output)
 DECLARE_LAYER(Pad)
@@ -182,9 +174,7 @@
 DECLARE_LAYER(Stack)
 DECLARE_LAYER(StandIn)
 DECLARE_LAYER(StridedSlice)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER(Subtraction)
-ARMNN_NO_DEPRECATE_WARN_END
 DECLARE_LAYER(Switch)
 DECLARE_LAYER(Transpose)
 DECLARE_LAYER(TransposeConvolution2d)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 837b42e..9ebb67b 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -213,16 +213,12 @@
 
 IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return pNetworkImpl->AddAdditionLayer(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return pNetworkImpl->AddMultiplicationLayer(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
@@ -312,23 +308,17 @@
 
 IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return pNetworkImpl->AddDivisionLayer(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return pNetworkImpl->AddSubtractionLayer(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return pNetworkImpl->AddMaximumLayer(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
@@ -355,9 +345,7 @@
 
 IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return pNetworkImpl->AddMinimumLayer(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
@@ -1996,30 +1984,22 @@
 
 IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return m_Graph->AddLayer<MaximumLayer>(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return m_Graph->AddLayer<MinimumLayer>(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return m_Graph->AddLayer<AdditionLayer>(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return m_Graph->AddLayer<MultiplicationLayer>(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
@@ -2258,16 +2238,12 @@
 
 IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return m_Graph->AddLayer<DivisionLayer>(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return m_Graph->AddLayer<SubtractionLayer>(name);
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c6bf085..03642ce 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -43,7 +43,7 @@
 
     IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
                                           const char* name = nullptr);
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
+
     IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
 
     IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
@@ -93,7 +93,6 @@
                                                     const ConstTensor& anchors,
                                                     const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
     IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
 
     IConnectableLayer* AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor& elementwiseBinaryDescriptor,
@@ -133,15 +132,12 @@
                                     const LstmInputParams& params,
                                     const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
     IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
 
     IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
     IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
     IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
 
     IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
@@ -212,7 +208,6 @@
     IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
                                             const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use AddElementwiseBinaryLayer instead", "24.02")
     IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
 
     IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index cae96ad..7117c14 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,12 +27,10 @@
     return factory.CreateWorkload(LayerType::Addition, descriptor, PrepInfoAndDesc(descriptor));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 AdditionLayer* AdditionLayer::Clone(Graph& graph) const
 {
     return CloneBase<AdditionLayer>(graph, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 void AdditionLayer::ExecuteStrategy(IStrategy &strategy) const
 {
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index cd20ff5..6980677 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,8 +10,7 @@
 namespace armnn
 {
 /// This layer represents an addition operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      AdditionLayer : public ElementwiseBaseLayer
+class AdditionLayer : public ElementwiseBaseLayer
 {
 public:
     /// Makes a workload for the Addition type.
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index db9f93d..e4e2a7d 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,12 +27,10 @@
     return factory.CreateWorkload(LayerType::Division, descriptor, PrepInfoAndDesc(descriptor));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DivisionLayer* DivisionLayer::Clone(Graph& graph) const
 {
     return CloneBase<DivisionLayer>(graph, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 void DivisionLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index bad96ea..398a947 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,8 +11,7 @@
 {
 
 /// This layer represents a division operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      DivisionLayer : public ElementwiseBaseLayer
+class DivisionLayer : public ElementwiseBaseLayer
 {
 public:
     /// Makes a workload for the Division type.
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 79c49b5..17e8b44 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -13,8 +13,7 @@
 /// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement:
 /// std::unique_ptr<IWorkload> Layer::CreateWorkload(const IWorkloadFactory& factory) const = 0;
 /// Layer* Clone(Graph& graph) const = 0;
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      ElementwiseBaseLayer : public Layer
+class ElementwiseBaseLayer : public Layer
 {
 public:
     /// Check if the input tensor shape(s)
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 6e180a2..f074cf9 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,12 +26,10 @@
     return factory.CreateWorkload(LayerType::Maximum, descriptor, PrepInfoAndDesc(descriptor));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 MaximumLayer* MaximumLayer::Clone(Graph& graph) const
 {
     return CloneBase<MaximumLayer>(graph, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 void MaximumLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 31b773e..2b113a4 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,8 +11,7 @@
 {
 
 /// This layer represents a maximum operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      MaximumLayer : public ElementwiseBaseLayer
+class MaximumLayer : public ElementwiseBaseLayer
 {
 public:
     /// Makes a workload for the Maximum type.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 061794c..f3661f9 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,12 +27,10 @@
     return factory.CreateWorkload(LayerType::Minimum, descriptor, PrepInfoAndDesc(descriptor));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 MinimumLayer* MinimumLayer::Clone(Graph& graph) const
 {
     return CloneBase<MinimumLayer>(graph, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 void MinimumLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 795d317..17ef55e 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,8 +11,7 @@
 {
 
 /// This layer represents a minimum operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      MinimumLayer : public ElementwiseBaseLayer
+class MinimumLayer : public ElementwiseBaseLayer
 {
 public:
     /// Makes a workload for the Minimum type.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index cc66947..bcc77dc 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,12 +27,10 @@
     return factory.CreateWorkload(LayerType::Multiplication, descriptor, PrepInfoAndDesc(descriptor));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
 {
     return CloneBase<MultiplicationLayer>(graph, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 void MultiplicationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index c1ddb3a..2dea822 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,8 +11,7 @@
 {
 
 /// This layer represents a multiplication operation.
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      MultiplicationLayer : public ElementwiseBaseLayer
+class MultiplicationLayer : public ElementwiseBaseLayer
 {
 public:
     /// Makes a workload for the Multiplication type.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 19e4d5a..0e92013 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,12 +27,10 @@
     return factory.CreateWorkload(LayerType::Subtraction, descriptor, PrepInfoAndDesc(descriptor));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
 {
     return CloneBase<SubtractionLayer>(graph, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 void SubtractionLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 6d2a2c5..86d5f9e 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,9 +11,7 @@
 {
 
 /// This layer represents a subtraction operation.
-
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ElementwiseBinaryLayer instead.", "24.02")
-      SubtractionLayer : public ElementwiseBaseLayer
+class SubtractionLayer : public ElementwiseBaseLayer
 {
 public:
     /// Makes a workload for the Subtraction type.
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index ff42ab8..f839004 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -339,9 +339,7 @@
 
     armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
-    ARMNN_NO_DEPRECATE_WARN_END
     head->GetOutputHandler().SetTensorInfo(info);
 
     graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
@@ -357,16 +355,14 @@
         ->GetOutputHandler().SetTensorInfo(info);
 
     // Check graph layer sequence before inserting convert layers
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     CHECK(CheckSequence(graph.cbegin(),
-                        graph.cend(),
-                        &IsLayerOfType<armnn::InputLayer>,
-                        &IsLayerOfType<armnn::InputLayer>,
-                        &IsLayerOfType<armnn::MemCopyLayer>,
-                        &IsLayerOfType<armnn::FloorLayer>,
-                        &IsLayerOfType<armnn::AdditionLayer>,
-                        &IsLayerOfType<armnn::OutputLayer>));
-    ARMNN_NO_DEPRECATE_WARN_END
+                             graph.cend(),
+                             &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::MemCopyLayer>,
+                             &IsLayerOfType<armnn::FloorLayer>,
+                             &IsLayerOfType<armnn::AdditionLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
 
     // Check layers have Float16 DataType
     for (auto& layer : graph)
@@ -409,21 +405,19 @@
     }
 
     // Check sequence of layers after inserting convert layers
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     CHECK(CheckSequence(graph.cbegin(),
-                        graph.cend(),
-                        &IsLayerOfType<armnn::InputLayer>,
-                        &IsLayerOfType<armnn::InputLayer>,
-                        &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
-                        &IsLayerOfType<armnn::MemCopyLayer>,
-                        &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
-                        &IsLayerOfType<armnn::FloorLayer>,
-                        &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
-                        &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
-                        &IsLayerOfType<armnn::AdditionLayer>,
-                        &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
-                        &IsLayerOfType<armnn::OutputLayer>));
-    ARMNN_NO_DEPRECATE_WARN_END
+                             graph.cend(),
+                             &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+                             &IsLayerOfType<armnn::MemCopyLayer>,
+                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+                             &IsLayerOfType<armnn::FloorLayer>,
+                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+                             &IsLayerOfType<armnn::AdditionLayer>,
+                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
 }
 
 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 6b3fe0f..e0d3a22 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -10,6 +10,7 @@
 #include <armnn/Descriptors.hpp>
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
+#include <armnn/TypesUtils.hpp>
 
 #include <armnn/profiling/ArmNNProfiling.hpp>
 
@@ -18,6 +19,9 @@
 
 #include <test/ProfilingTestUtils.hpp>
 
+#include <HeapProfiling.hpp>
+#include <LeakChecking.hpp>
+
 #ifdef WITH_VALGRIND
 #include <valgrind/memcheck.h>
 #endif
@@ -72,9 +76,7 @@
 
     auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
     auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    ARMNN_NO_DEPRECATE_WARN_END
     auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
 
     TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1304,9 +1306,7 @@
 
     auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
     auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    ARMNN_NO_DEPRECATE_WARN_END
     auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
 
     TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1349,9 +1349,7 @@
 
     auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
     auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    ARMNN_NO_DEPRECATE_WARN_END
     auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
 
     TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1394,9 +1392,7 @@
 
     auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
     auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    ARMNN_NO_DEPRECATE_WARN_END
     auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
 
     TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1439,9 +1435,7 @@
 
     auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
     auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    ARMNN_NO_DEPRECATE_WARN_END
     auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
 
     TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
@@ -1489,9 +1483,7 @@
 
     auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
     auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     auto addLayer    = testNetwork->AddAdditionLayer("add layer");
-    ARMNN_NO_DEPRECATE_WARN_END
     auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
 
     TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 };
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index c33b248..7b5d73a 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -181,9 +181,7 @@
 
 TEST_CASE("AdditionTest")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("ArgMinMaxTest")
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 4fcb476..e0fd5fe 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -1054,7 +1054,7 @@
     auto layerX2 = graph.AddLayer<InputLayer>(1, "layerX2");
     auto layerM1 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM1");
     auto layerM2 = graph.AddLayer<ActivationLayer>(activationDefaults, "layerM2");
-    auto layerM3 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "layerM3");
+    auto layerM3 = graph.AddLayer<AdditionLayer>("layerM3");
     auto layerX3 = graph.AddLayer<OutputLayer>(0, "layerX3");
 
     //  X1  X2
@@ -1081,7 +1081,7 @@
                     [](const Layer & l)
                     {
                         bool toSelect = (l.GetType() == LayerType::Activation
-                                         || l.GetType() == LayerType::ElementwiseBinary);
+                                         || l.GetType() == LayerType::Addition);
                         return toSelect;
                     });
 
@@ -1772,7 +1772,7 @@
     auto m0 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m0");
     auto x1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x1");
     auto m1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m1");
-    auto m2 = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "m2");
+    auto m2 = graph.AddLayer<AdditionLayer>("m2");
     auto x2 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x2");
 
     x0->GetOutputSlot(0).Connect(m0->GetInputSlot(0));
@@ -1872,7 +1872,7 @@
 bool ReplaceConstantMultiplicationWithDepthwise(SubgraphView& subgraph,
                                                 IConnectableLayer* layer)
 {
-    if (layer->GetType() == LayerType::ElementwiseBinary)
+    if (layer->GetType() == LayerType::Multiplication)
     {
         IInputSlot* patternSubgraphInput = &layer->GetInputSlot(0);
         IInputSlot* patternSubgraphConstant = &layer->GetInputSlot(1);
@@ -1937,12 +1937,12 @@
 bool ReplaceTestMultiplication(SubgraphView& subgraph,
                            IConnectableLayer* layer)
 {
-    if (layer->GetType() == LayerType::ElementwiseBinary)
+    if (layer->GetType() == LayerType::Multiplication)
     {
 
         switch (layer->GetType())
         {
-            case LayerType::ElementwiseBinary:
+            case LayerType::Multiplication:
                 return ReplaceConstantMultiplicationWithDepthwise(subgraph, layer);
                 break;
             default:
@@ -1993,7 +1993,7 @@
     auto constant = graph.AddLayer<ConstantLayer>("const");
 
     constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
-    IConnectableLayer* mul      = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul,  "mul");
+    IConnectableLayer* mul      = graph.AddLayer<MultiplicationLayer>("mul");
     IConnectableLayer* output   = graph.AddLayer<OutputLayer>(0, "output");
 
     // Create connections between layers
@@ -2015,10 +2015,7 @@
     // Check the WorkingCopy is as expected before replacement
     CHECK(workingCopy.GetIConnectableLayers().size() == 4);
     int idx=0;
-    LayerType expectedSorted[] = {LayerType::Input,
-                                  LayerType::Constant,
-                                  LayerType::ElementwiseBinary,
-                                  LayerType::Output};
+    LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
     workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
                                          {
                                              CHECK((expectedSorted[idx] == l->GetType()));
@@ -2212,7 +2209,7 @@
     auto constant = graph.AddLayer<ConstantLayer>("const");
 
     constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
-    IConnectableLayer* mul      = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul,  "mul");
+    IConnectableLayer* mul      = graph.AddLayer<MultiplicationLayer>("mul");
     IConnectableLayer* output   = graph.AddLayer<OutputLayer>(0, "output");
 
     // Create connections between layers
@@ -2233,10 +2230,7 @@
 
     // Check the WorkingCopy is as expected before replacement
     int idx=0;
-    LayerType expectedSorted[] = {LayerType::Input,
-                                  LayerType::Constant,
-                                  LayerType::ElementwiseBinary,
-                                  LayerType::Output};
+    LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
     workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
                                          {
                                              CHECK((expectedSorted[idx] == l->GetType()));
@@ -2291,7 +2285,7 @@
     auto constant = graph.AddLayer<ConstantLayer>("const");
 
     constant->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
-    IConnectableLayer* mul      = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul,  "mul");
+    IConnectableLayer* mul      = graph.AddLayer<MultiplicationLayer>("mul");
     IConnectableLayer* output   = graph.AddLayer<OutputLayer>(0, "output");
 
     // Create connections between layers
@@ -2312,10 +2306,7 @@
     // Check the WorkingCopy is as expected before replacement
     CHECK(workingCopy.GetIConnectableLayers().size() == 4);
     int idx=0;
-    LayerType expectedSorted[] = {LayerType::Input,
-                                  LayerType::Constant,
-                                  LayerType::ElementwiseBinary,
-                                  LayerType::Output};
+    LayerType expectedSorted[] = {LayerType::Input, LayerType::Constant, LayerType::Multiplication, LayerType::Output};
     workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
                                          {
                                              CHECK((expectedSorted[idx] == l->GetType()));
@@ -2355,7 +2346,7 @@
     auto constant = graph.AddLayer<ConstantLayer>("const");
 
     constant->m_LayerOutput     = std::make_shared<ScopedTensorHandle>(constTensor);
-    IConnectableLayer* mul      = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Mul,  "mul");
+    IConnectableLayer* mul      = graph.AddLayer<MultiplicationLayer>("mul");
     armnn::ViewsDescriptor splitterDesc(2,4);
     IConnectableLayer* split    = graph.AddLayer<SplitterLayer>(splitterDesc, "split");
     IConnectableLayer* abs      = graph.AddLayer<ActivationLayer>(ActivationFunction::Abs, "abs");
@@ -2420,7 +2411,7 @@
     CHECK(workingCopy.GetIConnectableLayers().size() == 4);
     int idx=0;
     LayerType expectedSorted[] = {LayerType::Constant,
-                                  LayerType::ElementwiseBinary,
+                                  LayerType::Multiplication,
                                   LayerType::Splitter,
                                   LayerType::Activation};
     workingCopy.ForEachIConnectableLayer([&idx, &expectedSorted](const IConnectableLayer* l)
@@ -2541,7 +2532,7 @@
     Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
     Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
     Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
-    Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
+    Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
 
     Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
     Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2592,7 +2583,7 @@
 
         // GetWorkingCopy() has caused address pointer of convolution layer to change.
         // Finding new address pointer...
-        if (layer->GetType() == LayerType::ElementwiseBinary)
+        if (layer->GetType() == LayerType::Addition)
         {
             addCopyLayer = layer;
         }
@@ -2643,7 +2634,7 @@
     Layer* convLayer = graph.AddLayer<Convolution2dLayer>(Convolution2dDescriptor(), "conv");
     Layer* reluLayer = graph.AddLayer<ActivationLayer>(ActivationDescriptor(), "activation");
     Layer* constLayer = graph.AddLayer<ConstantLayer>("const");
-    Layer* addLayer = graph.AddLayer<ElementwiseBinaryLayer>(BinaryOperation::Add, "add");
+    Layer* addLayer = graph.AddLayer<AdditionLayer>("add");
 
     Layer* outputLayer1 = graph.AddLayer<OutputLayer>(0, "output1");
     Layer* outputLayer2 = graph.AddLayer<OutputLayer>(1, "output2");
@@ -2669,7 +2660,7 @@
     {
         // GetWorkingCopy() has caused address pointer of convolution layer to change.
         // Finding new address pointer...
-        if (layer->GetType() == LayerType::ElementwiseBinary)
+        if (layer->GetType() == LayerType::Addition)
         {
             addCopyLayer = layer;
         }
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index eb488a5..497c36b 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -34,40 +34,31 @@
 
 TEST_SUITE("TestNameOnlyLayerVisitor")
 {
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr)
-ARMNN_NO_DEPRECATE_WARN_END
 TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName)
 TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr)
+
 }
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 2ccbc94..3b89171 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -1,10 +1,11 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "LayersFwd.hpp"
 
+#include <Network.hpp>
 #include <ResolveType.hpp>
 #include <armnn/INetwork.hpp>
 #include <GraphUtils.hpp>
@@ -237,7 +238,6 @@
     }
 };
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 struct MultiplicationTest
 {
@@ -272,9 +272,7 @@
         return {};
     }
 };
-ARMNN_NO_DEPRECATE_WARN_END
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 struct AdditionTest
 {
@@ -309,9 +307,7 @@
         return {};
     }
 };
-ARMNN_NO_DEPRECATE_WARN_END
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 struct SubtractionTest
 {
@@ -346,9 +342,7 @@
         return {};
     }
 };
-ARMNN_NO_DEPRECATE_WARN_END
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 struct DivisionTest
 {
@@ -383,7 +377,6 @@
         return {};
     }
 };
-ARMNN_NO_DEPRECATE_WARN_END
 
 template<typename LayerTest,
          DataType ArmnnType>
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3998ee7..6ddc971 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -32,9 +32,7 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const additionLayer = network->AddAdditionLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
@@ -359,9 +357,7 @@
     armnn::INetworkPtr network(armnn::INetwork::Create());
     armnn::IConnectableLayer* input = network->AddInputLayer(0);
     armnn::IConnectableLayer* constant = network->AddConstantLayer(constTensor, layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* add = network->AddAdditionLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* output = network->AddOutputLayer(0);
 
     input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -931,9 +927,7 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const divisionLayer = network->AddDivisionLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(divisionLayer->GetInputSlot(0));
@@ -1633,9 +1627,7 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const maximumLayer = network->AddMaximumLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(maximumLayer->GetInputSlot(0));
@@ -1860,9 +1852,7 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const minimumLayer = network->AddMinimumLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(minimumLayer->GetInputSlot(0));
@@ -1888,9 +1878,7 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const multiplicationLayer = network->AddMultiplicationLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
@@ -2748,9 +2736,7 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* const subtractionLayer = network->AddSubtractionLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(subtractionLayer->GetInputSlot(0));
@@ -2959,9 +2945,7 @@
 
     armnn::INetworkPtr network(armnn::INetwork::Create());
     armnn::IConnectableLayer* input = network->AddInputLayer(0);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* add = network->AddAdditionLayer();
-    ARMNN_NO_DEPRECATE_WARN_END
     armnn::IConnectableLayer* constant = network->AddConstantLayer(constTensor, layerName.c_str());
     armnn::IConnectableLayer* output = network->AddOutputLayer(0);
 
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index b16f14d..691adbf 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -174,9 +174,8 @@
                                                                     armnn::Graph& graph)
 {
     // Creates the layer we're testing.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    auto* const layer = graph.AddLayer<SubtractionLayer>("layer");
-    ARMNN_NO_DEPRECATE_WARN_END
+    SubtractionLayer* const layer = graph.AddLayer<SubtractionLayer>("layer");
+
     auto activationDesc = std::make_shared<ActivationDescriptor>();
     activationDesc->m_A        = 10.0f;
     activationDesc->m_B        = 5.0f;
@@ -234,9 +233,8 @@
                                                                        armnn::Graph& graph)
 {
     // Creates the layer we're testing.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    auto* const layer = graph.AddLayer<MultiplicationLayer>("layer");
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    MultiplicationLayer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
+
     auto activationDesc = std::make_shared<ActivationDescriptor>();
     activationDesc->m_A        = 10.0f;
     activationDesc->m_B        = 5.0f;
@@ -291,9 +289,8 @@
                                                                  armnn::Graph& graph)
 {
     // Creates the layer we're testing.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    auto* const layer = graph.AddLayer<AdditionLayer>("layer");
-    ARMNN_NO_DEPRECATE_WARN_END
+    AdditionLayer* const layer = graph.AddLayer<AdditionLayer>("layer");
+
     auto activationDesc = std::make_shared<ActivationDescriptor>();
     activationDesc->m_A        = 10.0f;
     activationDesc->m_B        = 5.0f;
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index fb7a4e1..599d353 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -77,10 +77,8 @@
                              ActivationDescriptor& activationDesc,
                              std::string name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddAdditionLayer(name.c_str());
     LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
-    ARMNN_NO_DEPRECATE_WARN_END
 
     FuseLayer(optimizationViews,
               baseLayer,
@@ -98,10 +96,8 @@
                                 ActivationDescriptor& activationDesc,
                                 std::string name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddSubtractionLayer(name.c_str());
     LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
-    ARMNN_NO_DEPRECATE_WARN_END
 
     FuseLayer(optimizationViews,
               baseLayer,
@@ -119,10 +115,8 @@
                              ActivationDescriptor& activationDesc,
                              std::string name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddDivisionLayer(name.c_str());
     LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
-    ARMNN_NO_DEPRECATE_WARN_END
 
     FuseLayer(optimizationViews,
               baseLayer,
@@ -140,10 +134,8 @@
                                    ActivationDescriptor& activationDesc,
                                    std::string name)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     IConnectableLayer* replacement = optimizationViews.GetINetwork()->AddMultiplicationLayer(name.c_str());
     LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
-    ARMNN_NO_DEPRECATE_WARN_END
 
     FuseLayer(optimizationViews,
               baseLayer,
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index e793b44..00e549c 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -43,7 +43,6 @@
                                                 const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
                                               const WorkloadInfo& /*info*/) const override
     { return nullptr; }
@@ -104,7 +103,6 @@
                                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
                                               const WorkloadInfo& /*info*/) const override
     { return nullptr; }
@@ -154,7 +152,6 @@
                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
@@ -175,12 +172,10 @@
                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
                                                     const WorkloadInfo& /*info*/) const override
     { return nullptr; }
@@ -253,7 +248,6 @@
                                                   const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
                                                  const WorkloadInfo& /*info*/) const override
     { return nullptr; }
diff --git a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp
index a0d1af6..f335218 100644
--- a/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/AdditionEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -30,7 +30,8 @@
 
     TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
 
-    IConnectableLayer* addition = network->AddElementwiseBinaryLayer(BinaryOperation::Add, "addition");
+
+    IConnectableLayer* addition = network->AddAdditionLayer("addition");
     IConnectableLayer* inputX = network->AddInputLayer(0, "inputX");
     IConnectableLayer* inputY = network->AddInputLayer(1, "inputY");
     IConnectableLayer* output = network->AddOutputLayer(0, "output");
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 9213f0e..795fc13 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -39,7 +39,7 @@
 
     IConnectableLayer* input = net->AddInputLayer(0);
     IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData));
-    IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add);
+    IConnectableLayer* add = net->AddAdditionLayer();
     IConnectableLayer* output = net->AddOutputLayer(0);
 
     input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -176,8 +176,7 @@
         for (unsigned int i = 0; i < out.size(); ++i)
         {
             CHECK_MESSAGE(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true,
-                    "Position: " << i <<" Actual output: " << static_cast<uint32_t>(out[i]) <<
-                    ". Expected output:" << static_cast<uint32_t>(it.second[i]));
+                    "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
     }
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 5475762..5b95d3c 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -622,9 +622,7 @@
 // Every entry in the armnn::LayerType enum must be accounted for below.
 DECLARE_LAYER_POLICY_2_PARAM(Activation)
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER_POLICY_1_PARAM(Addition)
-ARMNN_NO_DEPRECATE_WARN_END
 
 DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
 
@@ -696,21 +694,15 @@
 
 DECLARE_LAYER_POLICY_MAP_PARAM(Map, void)
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER_POLICY_1_PARAM(Maximum)
-ARMNN_NO_DEPRECATE_WARN_END
 
 DECLARE_LAYER_POLICY_2_PARAM(Mean)
 
 DECLARE_LAYER_POLICY_1_PARAM(Merge)
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
-ARMNN_NO_DEPRECATE_WARN_END
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
-ARMNN_NO_DEPRECATE_WARN_END
 
 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
 
@@ -734,9 +726,7 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER_POLICY_1_PARAM(Division)
-ARMNN_NO_DEPRECATE_WARN_END
 
 DECLARE_LAYER_POLICY_1_PARAM(Rank)
 
@@ -762,9 +752,7 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
-ARMNN_NO_DEPRECATE_WARN_END
 
 DECLARE_LAYER_POLICY_2_PARAM(Reduce)
 
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index 665358b..ff32179 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -263,9 +263,7 @@
     armnn::IConnectableLayer* input = net->AddInputLayer(0, "inLayer0");
     armnn::IConnectableLayer* input1 = net->AddInputLayer(1, "inLayer1");
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     armnn::IConnectableLayer* addition = net->AddAdditionLayer("addLayer");
-    ARMNN_NO_DEPRECATE_WARN_END
 
     armnn::IConnectableLayer* output = net->AddOutputLayer(0, "outLayer");
 
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 7303733..f5a6c42 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -133,7 +133,6 @@
 }
 
 // Convenience function to add an addition layer to a graph
-ARMNN_NO_DEPRECATE_WARN_BEGIN
 AdditionLayer* AddAdditionaLayer(Graph& graph,
                                  LayerNameToLayerMap& layersInGraph,
                                  const std::string& layerName,
@@ -145,7 +144,6 @@
     layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer));
     return additionLayer;
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 // Convenience function to check that the given substitution matches the specified expected values
 void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution,
@@ -752,9 +750,7 @@
                                                                "conv2 layer unoptimizable", outputInfo);
     Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor,
                                                                "conv3 layer", outputInfo);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
     Layer* const outputLayer = AddOutputLayer(graph, "output layer");
 
     // Connect the network
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 46ba9cb..a10b6fb 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -461,7 +461,6 @@
                             }
                             else if (base.GetType() == LayerType::Addition)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
 
                                 arm_compute::Status status = ClAdditionValidate(
@@ -480,11 +479,9 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::Division)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
 
                                 arm_compute::Status status = ClDivisionWorkloadValidate(
@@ -503,11 +500,9 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::Multiplication)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
 
                                 arm_compute::Status status = ClMultiplicationWorkloadValidate(
@@ -526,11 +521,9 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::Subtraction)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
 
                                 arm_compute::Status status = ClSubtractionValidate(
@@ -549,7 +542,6 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::ElementwiseBinary)
                             {
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index e1266c8..89bcf9b 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -346,9 +346,7 @@
         case LayerType::Dequantize:
             return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
         case LayerType::Division:
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
             return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
-            ARMNN_NO_DEPRECATE_WARN_END
         case LayerType::ElementwiseBinary:
         {
             auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
@@ -476,22 +474,16 @@
                                                       infos[2],
                                                       reasonIfUnsupported);
         case LayerType::Maximum:
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
             return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
-            ARMNN_NO_DEPRECATE_WARN_END
         case LayerType::Mean:
             return IsMeanSupported(infos[0],
                                    infos[1],
                                    *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
                                    reasonIfUnsupported);
         case LayerType::Minimum:
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
             return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
-            ARMNN_NO_DEPRECATE_WARN_END
         case LayerType::Multiplication:
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
             return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
-            ARMNN_NO_DEPRECATE_WARN_END
         case LayerType::Normalization:
             return IsNormalizationSupported(infos[0],
                                             infos[1],
@@ -612,9 +604,7 @@
                                            *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
                                            reasonIfUnsupported);
         case LayerType::Subtraction:
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
             return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
-            ARMNN_NO_DEPRECATE_WARN_END
         case LayerType::Transpose:
             return IsTransposeSupported(infos[0],
                                         infos[1],
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index fa28141..2d784e3 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -30,7 +30,6 @@
                                const ActivationDescriptor& descriptor,
                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     bool IsAdditionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
@@ -129,14 +128,13 @@
                                                 const Optional<TensorInfo>& biases,
                                                 Optional<std::string&> reason = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     bool IsDivisionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsElementwiseUnarySupported(const TensorInfo& input,
-                                     const TensorInfo& output,
+                                     const TensorInfo& ouput,
                                      const ElementwiseUnaryDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -202,7 +200,6 @@
                          const LstmInputParamsInfo& paramsInfo,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     bool IsMaximumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
@@ -213,13 +210,11 @@
                          const MeanDescriptor& descriptor,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     bool IsMinimumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     bool IsMultiplicationSupported(const TensorInfo& input0,
                                    const TensorInfo& input1,
                                    const TensorInfo& output,
@@ -330,7 +325,6 @@
                                  const StridedSliceDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use CreateElementwiseBinary instead", "24.02")
     bool IsSubtractionSupported(const TensorInfo& input0,
                                 const TensorInfo& input1,
                                 const TensorInfo& output,
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index c68f4ce..cea2aa3 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -313,7 +313,6 @@
                             }
                             else if (base.GetType() == LayerType::Addition)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
 
                                 arm_compute::Status status = NeonAdditionWorkloadValidate(
@@ -332,11 +331,9 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::Division)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
 
                                 arm_compute::Status status = NeonDivisionWorkloadValidate(
@@ -355,11 +352,9 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::Multiplication)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
 
                                 arm_compute::Status status = NeonMultiplicationWorkloadValidate(
@@ -378,11 +373,9 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::Subtraction)
                             {
-                                ARMNN_NO_DEPRECATE_WARN_BEGIN
                                 SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
 
                                 arm_compute::Status status = NeonSubtractionWorkloadValidate(
@@ -401,7 +394,6 @@
                                     untouched.erase(baseLayer->GetGuid());
                                     untouched.erase(activationLayer->GetGuid());
                                 }
-                                ARMNN_NO_DEPRECATE_WARN_END
                             }
                             else if (base.GetType() == LayerType::ElementwiseBinary)
                             {
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 19881c2..66718cc 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -128,86 +128,70 @@
 
 TEST_CASE("CreateAdditionFloatWorkload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
                                       AdditionQueueDescriptor,
                                       AdditionLayer,
                                       DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_CASE("CreateSubtractionFloat16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
                                       SubtractionLayer,
                                       DataType::Float16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 #endif
 
 TEST_CASE("CreateSubtractionFloatWorkload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
                                       SubtractionLayer,
                                       DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateSubtractionUint8Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
                                       SubtractionLayer,
                                       DataType::QAsymmU8>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_CASE("CreateMultiplicationFloat16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
                                       MultiplicationLayer,
                                       DataType::Float16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 #endif
 
 TEST_CASE("CreateMultiplicationFloatWorkload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
                                       MultiplicationLayer,
                                       DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateMultiplicationUint8Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
                                       MultiplicationLayer,
                                       DataType::QAsymmU8>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateDivisionFloatWorkloadTest")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
                                       DivisionQueueDescriptor,
                                       DivisionLayer,
                                       armnn::DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 3bba0b7..c46a9e5 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -142,182 +142,146 @@
 
 TEST_CASE("CreateAdditionFloatWorkload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
         AdditionLayer,
         armnn::DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateAdditionUint8Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
         AdditionLayer,
         armnn::DataType::QAsymmU8>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateAdditionInt16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
         AdditionLayer,
         armnn::DataType::QSymmS16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateAdditionInt32Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
             AdditionQueueDescriptor,
             AdditionLayer,
             armnn::DataType::Signed32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateSubtractionFloat32Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
         SubtractionLayer,
         armnn::DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateSubtractionFloat16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
         SubtractionLayer,
         armnn::DataType::Float16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateSubtractionUint8Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
         SubtractionLayer,
         armnn::DataType::QAsymmU8>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateSubtractionInt16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
         SubtractionLayer,
         armnn::DataType::QSymmS16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateSubtractionInt32Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
             SubtractionQueueDescriptor,
             SubtractionLayer,
             armnn::DataType::Signed32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateMultiplicationFloatWorkload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
         MultiplicationLayer,
         armnn::DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateMultiplicationUint8Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
         MultiplicationLayer,
         armnn::DataType::QAsymmU8>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateMultiplicationInt16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
         MultiplicationLayer,
         armnn::DataType::QSymmS16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateMultiplicationInt32Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
             MultiplicationQueueDescriptor,
             MultiplicationLayer,
             armnn::DataType::Signed32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateDivisionFloat32Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
         DivisionLayer,
         armnn::DataType::Float32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateDivisionFloat16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
         DivisionLayer,
         armnn::DataType::Float16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateDivisionUint8Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
         DivisionLayer,
         armnn::DataType::QAsymmU8>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateDivisionInt16Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
         DivisionLayer,
         armnn::DataType::QSymmS16>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 TEST_CASE("CreateDivisionInt32Workload")
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
             DivisionQueueDescriptor,
             DivisionLayer,
             armnn::DataType::Signed32>();
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>