IVGCVSW-5985 Remove deprecated code

 * Removes deprecated AddLayer, IsLayerSupported functions
 * Marks the whole LayerVisitor class as deprecated not just the
   constructor. This required to wrap all Accept functions in a
   no deprecate macro because the LayerVisitor is used as a parameter in
   there
 * Removes usage of deprecated LayerVisitor and replaces it
   with ExecuteStrategy. This required a few structural changes
   in the unit tests
 * Adds a default implementation for IStrategy called StrategyBase
 * Changes pyarmnn to use non deprecated constructor for
   INetworkProperties and adds related unit test
 * Marks usage of deprecated code in pyarmnn as deprecated. This
   required to extend INetworkProperties to allow backwards compatibility
 * Removes deprecated functions from CpuAcc, GpuAcc and Ref backends

Note: This patch breaks compatibility with backends that are not
      updated in this patch

!android-nn-driver:6325

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Id13b6f37a74d26eadeda2da1dc92915e725ed5a5
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0989e12..bebee7f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -193,6 +193,7 @@
     include/armnn/NetworkFwd.hpp
     include/armnn/Optional.hpp
     include/armnn/QuantizedLstmParams.hpp
+    include/armnn/StrategyBase.hpp
     include/armnn/Tensor.hpp
     include/armnn/TensorFwd.hpp
     include/armnn/Threadpool.hpp
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 80676de..0bd37dc 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -28,11 +28,6 @@
 
     bool IsBackendRegistered() const;
 
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsAbsSupported(const TensorInfo& input,
-                        const TensorInfo& output,
-                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -164,12 +159,6 @@
                                      const ElementwiseUnaryDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsEqualSupported(const TensorInfo& input0,
-                          const TensorInfo& input1,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
@@ -190,24 +179,12 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
-    bool IsGatherSupported(const TensorInfo& input0,
-                           const TensorInfo& input1,
-                           const TensorInfo& output,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsGatherSupported(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
                            const GatherDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsGreaterSupported(const TensorInfo& input0,
-                            const TensorInfo& input1,
-                            const TensorInfo& ouput,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
@@ -272,12 +249,6 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-    bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                           const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsMinimumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
@@ -356,21 +327,11 @@
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-    bool IsResizeBilinearSupported(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsResizeSupported(const TensorInfo& input,
                            const TensorInfo& output,
                            const ResizeDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsRsqrtSupported(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsShapeSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
@@ -395,11 +356,6 @@
                                  const SpaceToDepthDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
 
-    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-    bool IsSplitterSupported(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
     bool IsSplitterSupported(const TensorInfo& input,
                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                              const ViewsDescriptor& descriptor,
@@ -489,7 +445,7 @@
                                                             const armnn::BackendId& backend);
 
 /// Convenience function to check a capability on a backend
-ARMNN_DEPRECATED_MSG("This function has been deprecated in favour of GetBackendCapability")
+ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetBackendCapability", "22.05")
 bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability);
 
 }
diff --git a/include/armnn/Deprecated.hpp b/include/armnn/Deprecated.hpp
index 2b9240f..c493adb 100644
--- a/include/armnn/Deprecated.hpp
+++ b/include/armnn/Deprecated.hpp
@@ -34,13 +34,15 @@
 #   define ARMNN_NO_DEPRECATE_WARN_END
 #endif
 
-#define ARMNN_SUPRESS_DEPRECATE_WARNING(func) \
+#define ARMNN_SUPPRESS_DEPRECATE_WARNING(func) \
 ARMNN_NO_DEPRECATE_WARN_BEGIN \
 func; \
 ARMNN_NO_DEPRECATE_WARN_END
 
 #define ARMNN_DEPRECATED [[deprecated]]
 #define ARMNN_DEPRECATED_MSG(message) [[deprecated(message)]]
+#define ARMNN_DEPRECATED_MSG_REMOVAL_DATE(message, removed_in_release) \
+[[deprecated("Expected to be removed in release " #removed_in_release ". " message)]]
 
 #if defined(__GNUC__) && (__GNUC__ < 6)
 #   define ARMNN_DEPRECATED_ENUM
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 9a5128a..b412bbd 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -243,14 +243,6 @@
     uint32_t**        m_ViewSizes;
 };
 
-template <typename TensorShapeIt>
-ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
-OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first,
-                                                         TensorShapeIt last,
-                                                         unsigned int concatenationDimension)
-{
-    return CreateDescriptorForConcatenation(first, last, concatenationDimension);
-}
 
 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
 /// concatenation of a number of input tensors.
@@ -402,7 +394,7 @@
     }
 
     /// Get the number of views/inputs.
-    ARMNN_DEPRECATED_MSG("Use GetNumInputs instead")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
     uint32_t GetNumViews() const;
 
     /// Get the number of views/inputs.
@@ -839,7 +831,10 @@
 };
 
 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
-struct ResizeBilinearDescriptor : BaseDescriptor
+struct ARMNN_DEPRECATED_MSG_REMOVAL_DATE(
+        "ResizeBilinearDescriptor is not supported anymore. Use ResizeDescriptor instead.",
+        "22.08")
+        ResizeBilinearDescriptor : BaseDescriptor
 {
     ResizeBilinearDescriptor()
         : m_TargetWidth(0)
@@ -849,6 +844,7 @@
         , m_HalfPixelCenters(false)
     {}
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     bool operator ==(const ResizeBilinearDescriptor& rhs) const
     {
         return m_TargetWidth          == rhs.m_TargetWidth &&
@@ -857,6 +853,7 @@
                m_AlignCorners         == rhs.m_AlignCorners &&
                m_HalfPixelCenters     == rhs.m_HalfPixelCenters;
     }
+    ARMNN_NO_DEPRECATE_WARN_END
 
     /// Target width value.
     uint32_t          m_TargetWidth;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index cceb545..a57db3c 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -13,21 +13,13 @@
 
 namespace armnn
 {
-class ILayerVisitor
+class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable IStrategy instead.", "22.05") ILayerVisitor
 {
 protected:
-    ARMNN_DEPRECATED_MSG("Use ABI stable IStrategy instead.")
     ILayerVisitor() {}
     virtual ~ILayerVisitor() {}
 
 public:
-    /// Function an absolute layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    virtual void VisitAbsLayer(const IConnectableLayer* layer,
-                               const char* name = nullptr) = 0;
 
     /// Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
@@ -93,13 +85,7 @@
     /// @param name - Optional name for the layer.
     virtual void VisitConcatLayer(const IConnectableLayer* layer,
                                   const OriginsDescriptor& concatDescriptor,
-                                  const char* name = nullptr)
-    {
-        // default implementation to ease transition while MergerLayer is being deprecated
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        VisitMergerLayer(layer, concatDescriptor, name);
-        ARMNN_NO_DEPRECATE_WARN_END
-    }
+                                  const char* name = nullptr) = 0;
 
     /// Function a layer with no inputs and a single output, which always corresponds to
     /// the passed in constant tensor should call back to when its Accept(ILayerVisitor&) function is invoked.
@@ -178,13 +164,6 @@
                                             const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                             const char* name = nullptr) = 0;
 
-    /// Function an Equal layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
-    virtual void VisitEqualLayer(const IConnectableLayer* layer,
-                                 const char* name = nullptr) = 0;
-
     /// Function a fill layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param fillDescriptor - Description of the layer
@@ -216,7 +195,7 @@
     /// @param weights - Tensor for the weights data.
     /// @param biases - Optional tensor for the bias data.
     /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitFullyConnectedLayer without ConstTensors")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use VisitFullyConnectedLayer without ConstTensors", "22.05")
     virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
                                           const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                           const ConstTensor& weights,
@@ -225,26 +204,12 @@
 
     /// Function a Gather layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitGatherLayer with descriptor instead")
-    virtual void VisitGatherLayer(const IConnectableLayer* layer,
-                                  const char* name = nullptr) = 0;
-
-    /// Function a Gather layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param gatherDescriptor - Parameters for the gather operation.
     /// @param name - Optional name for the layer.
     virtual void VisitGatherLayer(const IConnectableLayer* layer,
                                   const GatherDescriptor& gatherDescriptor,
                                   const char* name = nullptr) = 0;
 
-    /// Function a Greater layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
-    virtual void VisitGreaterLayer(const IConnectableLayer* layer,
-                                   const char* name = nullptr) = 0;
-
     /// Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified
@@ -318,18 +283,6 @@
     virtual void VisitMergeLayer(const IConnectableLayer* layer,
                                  const char* name = nullptr) = 0;
 
-    /// Function that a merger layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
-    ///                           process. Number of Views must be equal to the number of inputs, and their order
-    ///                           must match - e.g. first view corresponds to the first input, second view to the
-    ///                           second input, etc....
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead")
-    virtual void VisitMergerLayer(const IConnectableLayer* layer,
-                                  const MergerDescriptor& mergerDescriptor,
-                                  const char* name = nullptr) = 0;
-
     /// Function a Minimum layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param name - Optional name for the layer.
@@ -437,15 +390,6 @@
                                    const ReshapeDescriptor& reshapeDescriptor,
                                    const char* name = nullptr) = 0;
 
-    /// Function that a resize bilinear layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param resizeDesc - Parameters for the resize operation.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
-    virtual void VisitResizeBilinearLayer(const IConnectableLayer* layer,
-                                          const ResizeBilinearDescriptor& resizeDesc,
-                                          const char* name = nullptr) = 0;
-
     /// Function that a resize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param resizeDescriptor - Parameters for the resize operation.
@@ -454,14 +398,6 @@
                                   const ResizeDescriptor& resizeDescriptor,
                                   const char* name = nullptr) = 0;
 
-    /// Function a Reciprocal of square root layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    virtual void VisitRsqrtLayer(const IConnectableLayer* layer,
-                                 const char* name = nullptr) = 0;
-
     /// Function that a slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 8ec8de0..a8e6cfc 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -88,8 +88,17 @@
     /// Returns the unique id of the layer
     virtual LayerGuid GetGuid() const = 0;
 
+    // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
+    // the deprecated ILayerVisitor which is used in the function.
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     /// Apply a visitor to this layer
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
+                                      "Accept function is deprecated. Use IStrategy in combination with "
+                                      "ExecuteStrategy instead, which is an ABI/API stable version of the "
+                                      "visitor pattern.",
+                                      "22.05")
     virtual void Accept(ILayerVisitor& visitor) const = 0;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     /// Apply a visitor to this layer
     virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
@@ -230,12 +239,12 @@
                                              const Optional<ConstTensor>& biases,
                                              const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const ConstTensor& biases,
@@ -271,19 +280,6 @@
         const Optional<ConstTensor>& biases,
         const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
-    IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const char* name = nullptr);
-
-    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
-    IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const ConstTensor& biases,
-        const char* name = nullptr);
-
     /// Adds a Dequantize layer to the network.
     /// @return - Interface for configuring the layer.
     IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
@@ -337,13 +333,13 @@
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddFullyConnectedLayer overload is deprecated", "22.05")
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const Optional<ConstTensor>& weights,
                                               const Optional<ConstTensor>& biases,
                                               const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddFullyConnectedLayer overload is deprecated", "22.05")
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const ConstTensor& weights,
                                               const Optional<ConstTensor>& biases,
@@ -414,23 +410,6 @@
     /// @return - Interface for configuring the layer.
     IConnectableLayer* AddMergeLayer(const char* name = nullptr);
 
-    /// Adds a concat layer to the network.
-    /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
-    ///                           process. Number of Views must be equal to the number of inputs, and their order
-    ///                           must match - e.g. first view corresponds to the first input, second view to the
-    ///                           second input, etc....
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
-    IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
-        const char* name = nullptr);
-
-    /// Add absolute layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    IConnectableLayer* AddAbsLayer(const char* name = nullptr);
-
     /// Adds an addition layer to the network.
     /// @param name - Optional name for the layer.
     /// @return - Interface for configuring the layer.
@@ -460,14 +439,6 @@
     /// @return - Interface for configuring the layer.
     IConnectableLayer* AddRankLayer(const char* name = nullptr);
 
-    /// Adds a resize bilinear layer to the network.
-    /// @param resizeDesc - Parameters for the resize operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
-    IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
-                                              const char* name = nullptr);
-
     /// Adds a resize layer to the network.
     /// @param resizeDescriptor - Parameters for the resize operation.
     /// @param name - Optional name for the layer.
@@ -608,30 +579,6 @@
     /// @return - Interface for configuring the layer.
     IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
 
-    /// Add a Greater layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
-
-    /// Add a Equal layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    IConnectableLayer* AddEqualLayer(const char* name = nullptr);
-
-    /// Add Reciprocal of square root layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
-
-    /// Add Gather layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddGatherLayer with descriptor instead")
-    IConnectableLayer* AddGatherLayer(const char* name = nullptr);
-
     /// Add Gather layer to the network.
     /// @param descriptor - Description of the gather layer.
     /// @param name - Optional name for the layer.
@@ -722,7 +669,17 @@
     IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& descriptor,
                                               const char* name = nullptr);
 
+    // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
+    // the deprecated ILayerVisitor which is used in the function.
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
+    /// Apply a visitor to this layer
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
+                                      "Accept function is deprecated. Use IStrategy in combination with "
+                                      "ExecuteStrategy instead, which is an ABI/API stable version of the "
+                                      "visitor pattern.",
+                                      "22.05")
     void Accept(ILayerVisitor& visitor) const;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const;
 
diff --git a/include/armnn/IRuntime.hpp b/include/armnn/IRuntime.hpp
index 908fe76..a46830c 100644
--- a/include/armnn/IRuntime.hpp
+++ b/include/armnn/IRuntime.hpp
@@ -31,7 +31,7 @@
 
 struct INetworkProperties
 {
-    ARMNN_DEPRECATED_MSG("Please use INetworkProperties constructor with MemorySource argument")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Please use INetworkProperties constructor with MemorySource argument", "22.02")
     INetworkProperties(bool importEnabled = false,
                        bool exportEnabled = false,
                        bool asyncEnabled = false,
@@ -45,7 +45,7 @@
           m_OutputSource(m_ExportEnabled ? MemorySource::Malloc : MemorySource::Undefined)
     {}
 
-    ARMNN_DEPRECATED_MSG("Please use INetworkProperties constructor without numThreads argument")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Please use INetworkProperties constructor without numThreads argument", "22.02")
     INetworkProperties(bool asyncEnabled,
                        MemorySource inputSource,
                        MemorySource outputSource,
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 6f1eb03..03b706f 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -214,14 +214,6 @@
                       char* reasonIfUnsupported = nullptr,
                       size_t reasonIfUnsupportedMaxLength = 1024);
 
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-bool IsMergerSupported(const BackendId& backend,
-                       const std::vector<const TensorInfo*> inputs,
-                       const TensorInfo& output,
-                       const OriginsDescriptor& descriptor,
-                       char* reasonIfUnsupported = nullptr,
-                       size_t reasonIfUnsupportedMaxLength = 1024);
 
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsMinimumSupported(const BackendId& backend,
@@ -318,14 +310,6 @@
                         size_t reasonIfUnsupportedMaxLength = 1024);
 
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
-ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-bool IsResizeBilinearSupported(const BackendId& backend,
-                               const TensorInfo& input,
-                               const TensorInfo& output,
-                               char* reasonIfUnsupported = nullptr,
-                               size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsResizeSupported(const BackendId& backend,
                        const TensorInfo& input,
                        const TensorInfo& output,
@@ -364,13 +348,6 @@
                              char* reasonIfUnsupported = nullptr,
                              size_t reasonIfUnsupportedMaxLength = 1024);
 
-ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-bool IsSplitterSupported(const BackendId& backend,
-                         const TensorInfo& input,
-                         const ViewsDescriptor& descriptor,
-                         char* reasonIfUnsupported = nullptr,
-                         size_t reasonIfUnsupportedMaxLength = 1024);
-
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 43fc7b9..3d43725 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -30,8 +30,6 @@
     virtual ~LayerVisitorBase() {}
 
 public:
-    void VisitAbsLayer(const IConnectableLayer*,
-                       const char*) override { DefaultPolicy::Apply(__func__); }
 
     void VisitActivationLayer(const IConnectableLayer*,
                               const ActivationDescriptor&,
@@ -99,9 +97,6 @@
                                     const ElementwiseUnaryDescriptor&,
                                     const char*) override { DefaultPolicy::Apply(__func__); }
 
-    void VisitEqualLayer(const IConnectableLayer*,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
     void VisitFillLayer(const IConnectableLayer*,
                         const FillDescriptor&,
                         const char*) override { DefaultPolicy::Apply(__func__); }
@@ -119,17 +114,10 @@
                                   const Optional<ConstTensor>&,
                                   const char*) override { DefaultPolicy::Apply(__func__); }
 
-    ARMNN_DEPRECATED_MSG("Use VisitGatherLayer with descriptor instead")
-    void VisitGatherLayer(const IConnectableLayer*,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
     void VisitGatherLayer(const IConnectableLayer*,
                           const GatherDescriptor&,
                           const char*) override { DefaultPolicy::Apply(__func__); }
 
-    void VisitGreaterLayer(const IConnectableLayer*,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
     void VisitInputLayer(const IConnectableLayer*,
                          LayerBindingId,
                          const char*) override { DefaultPolicy::Apply(__func__); }
@@ -165,10 +153,6 @@
     void VisitMergeLayer(const IConnectableLayer*,
                          const char*) override { DefaultPolicy::Apply(__func__); }
 
-    void VisitMergerLayer(const IConnectableLayer*,
-                          const MergerDescriptor&,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
     void VisitMinimumLayer(const IConnectableLayer*,
                            const char*) override { DefaultPolicy::Apply(__func__); }
 
@@ -221,17 +205,10 @@
                            const ReshapeDescriptor&,
                            const char*) override { DefaultPolicy::Apply(__func__); }
 
-    void VisitResizeBilinearLayer(const IConnectableLayer*,
-                                  const ResizeBilinearDescriptor&,
-                                  const char*) override { DefaultPolicy::Apply(__func__); }
-
     void VisitResizeLayer(const IConnectableLayer*,
                           const ResizeDescriptor&,
                           const char*) override { DefaultPolicy::Apply(__func__); }
 
-    void VisitRsqrtLayer(const IConnectableLayer*,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
     void VisitSliceLayer(const IConnectableLayer*,
                          const SliceDescriptor&,
                          const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/include/armnn/StrategyBase.hpp b/include/armnn/StrategyBase.hpp
new file mode 100644
index 0000000..78f393f
--- /dev/null
+++ b/include/armnn/StrategyBase.hpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+
+#include <armnn/INetwork.hpp>
+#include <armnn/IStrategy.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+namespace armnn
+{
+
+struct ThrowingStrategy
+{
+    void Apply(const std::string& errorMessage = "") { throw UnimplementedException(errorMessage); };
+};
+
+struct NoThrowStrategy
+{
+    void Apply(const std::string&) {};
+};
+
+/// Strategy base class with empty implementations.
+template <typename DefaultStrategy>
+class StrategyBase : public IStrategy
+{
+protected:
+    virtual ~StrategyBase() {};
+
+public:
+    virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                                 const armnn::BaseDescriptor& descriptor,
+                                 const std::vector<armnn::ConstTensor>& constants,
+                                 const char* name,
+                                 const armnn::LayerBindingId id = 0) override
+    {
+        armnn::IgnoreUnused(descriptor, constants, id, name);
+        switch (layer->GetType())
+        {
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
+    }
+
+protected:
+    DefaultStrategy m_DefaultStrategy;
+
+};
+
+
+} // namespace armnn
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index ef52368..e713b89 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -40,14 +40,10 @@
     Signed32 = 3,
     Boolean  = 4,
     QSymmS16 = 5,
-    QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
-    QSymmS8  = 7,
-    QAsymmS8 = 8,
-    BFloat16 = 9,
-    Signed64 = 10,
-
-    QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
-    QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
+    QSymmS8  = 6,
+    QAsymmS8 = 7,
+    BFloat16 = 8,
+    Signed64 = 9,
 };
 
 enum class DataLayout
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index b644daa..9bd9c81 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -149,9 +149,6 @@
         case DataType::QAsymmU8:              return 1U;
         case DataType::QAsymmS8:              return 1U;
         case DataType::QSymmS8:               return 1U;
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case DataType::QuantizedSymm8PerAxis: return 1U;
-        ARMNN_NO_DEPRECATE_WARN_END
         case DataType::QSymmS16:              return 2U;
         case DataType::Boolean:               return 1U;
         default:                              return 0U;
@@ -201,9 +198,6 @@
         case DataType::QAsymmU8:              return "QAsymmU8";
         case DataType::QAsymmS8:              return "QAsymmS8";
         case DataType::QSymmS8:               return "QSymmS8";
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case DataType::QuantizedSymm8PerAxis: return "QSymm8PerAxis";
-        ARMNN_NO_DEPRECATE_WARN_END
         case DataType::QSymmS16:              return "QSymm16";
         case DataType::Signed32:              return "Signed32";
         case DataType::Boolean:               return "Boolean";
@@ -268,12 +262,9 @@
 
 constexpr bool IsQuantized8BitType(DataType dataType)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return dataType == DataType::QAsymmU8        ||
            dataType == DataType::QAsymmS8        ||
-           dataType == DataType::QSymmS8         ||
-           dataType == DataType::QuantizedSymm8PerAxis;
-    ARMNN_NO_DEPRECATE_WARN_END
+           dataType == DataType::QSymmS8;
 }
 
 constexpr bool IsQuantizedType(DataType dataType)
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 2045ba2..f4fe678 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -98,25 +98,6 @@
     using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
     using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
 
-    using GraphUniquePtr = std::unique_ptr<Graph>;
-    using SubgraphViewUniquePtr = std::unique_ptr<SubgraphView>;
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    using ISubGraphConverterPtr ARMNN_DEPRECATED_MSG("This type is no longer supported")
-        = std::unique_ptr<ISubGraphConverter>;
-    using SubGraphUniquePtr ARMNN_DEPRECATED_MSG("SubGraph is deprecated, use SubgraphView instead")
-        = std::unique_ptr<SubGraph>;
-
-    ARMNN_DEPRECATED_MSG("This method is no longer supported")
-    virtual ISubGraphConverterPtr CreateSubGraphConverter(const std::shared_ptr<SubGraph>& subGraph) const;
-
-    ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
-    virtual Optimizations GetOptimizations() const;
-
-    ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
-    virtual SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph, bool& optimizationAttempted) const;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     virtual IMemoryManagerUniquePtr CreateMemoryManager() const;
 
     virtual IWorkloadFactoryPtr CreateWorkloadFactory(
@@ -194,7 +175,7 @@
     };
 
     /// Returns true if backend support the capability false otherwise
-    ARMNN_DEPRECATED_MSG("This function has been deprecated in favour of GetCapability")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetCapability", "22.05")
     virtual bool HasCapability(BackendCapability /*capabilityClass*/) const { return false; }
 
     /// Signals the backend to use a custom memory allocator provided by the user
diff --git a/include/armnn/backends/ILayerSupport.hpp b/include/armnn/backends/ILayerSupport.hpp
index 3744f31..2fbb081 100644
--- a/include/armnn/backends/ILayerSupport.hpp
+++ b/include/armnn/backends/ILayerSupport.hpp
@@ -27,10 +27,6 @@
     virtual ~ILayerSupport() {}
 
 public:
-    virtual bool IsAbsSupported(const TensorInfo& input,
-                                const TensorInfo& output,
-                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
     virtual bool IsActivationSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const ActivationDescriptor& descriptor,
@@ -162,11 +158,6 @@
                                              const ElementwiseUnaryDescriptor& descriptor,
                                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
-    virtual bool IsEqualSupported(const TensorInfo& input0,
-                                  const TensorInfo& input1,
-                                  const TensorInfo& output,
-                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
     virtual bool IsFakeQuantizationSupported(const TensorInfo& input,
                                              const FakeQuantizationDescriptor& descriptor,
                                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -190,19 +181,9 @@
     virtual bool IsGatherSupported(const TensorInfo& input0,
                                    const TensorInfo& input1,
                                    const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
-    virtual bool IsGatherSupported(const TensorInfo& input0,
-                                   const TensorInfo& input1,
-                                   const TensorInfo& output,
                                    const GatherDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
-    virtual bool IsGreaterSupported(const TensorInfo& input0,
-                                    const TensorInfo& input1,
-                                    const TensorInfo& ouput,
-                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
     virtual bool IsInputSupported(const TensorInfo& input,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
@@ -267,11 +248,6 @@
                                   const TensorInfo& output,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
-    virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                   const TensorInfo& output,
-                                   const OriginsDescriptor& descriptor,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
     virtual bool IsMinimumSupported(const TensorInfo& input0,
                                     const TensorInfo& input1,
                                     const TensorInfo& ouput,
@@ -350,19 +326,11 @@
                                     const ReshapeDescriptor& descriptor,
                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
-    virtual bool IsResizeBilinearSupported(const TensorInfo& input,
-                                           const TensorInfo& output,
-                                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
     virtual bool IsResizeSupported(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const ResizeDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
-    virtual bool IsRsqrtSupported(const TensorInfo& input,
-                                  const TensorInfo& output,
-                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
     virtual bool IsShapeSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -388,10 +356,6 @@
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
     virtual bool IsSplitterSupported(const TensorInfo& input,
-                                     const ViewsDescriptor& descriptor,
-                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
-    virtual bool IsSplitterSupported(const TensorInfo& input,
                                      const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                                      const ViewsDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 0c88ccc..ae26346 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -72,9 +72,9 @@
 
 *ARMNN_INCLUDE* and *ARMNN_LIB* are mandatory and should point to Arm NN includes and libraries against which you will be generating the wrappers. *SWIG_EXECUTABLE* should only be set if you have multiple versions of SWIG installed or you used a custom location for your installation:
 ```bash
-$ export SWIG_EXECUTABLE=<path_to_swig>
-$ export ARMNN_INCLUDE=<path_to_armnn_include>
-$ export ARMNN_LIB=<path_to_armnn_libraries>
+$ export SWIG_EXECUTABLE=/full/path/to/swig/executable
+$ export ARMNN_INCLUDE=/full/path/to/armnn/include:/full/path/to/armnn/profiling/common/include
+$ export ARMNN_LIB=/path/to/libs
 ```
 
 ##### 2. Clean and build SWIG wrappers:
diff --git a/python/pyarmnn/src/pyarmnn/__init__.py b/python/pyarmnn/src/pyarmnn/__init__.py
index 5cb8bfb..13fdf95 100644
--- a/python/pyarmnn/src/pyarmnn/__init__.py
+++ b/python/pyarmnn/src/pyarmnn/__init__.py
@@ -67,6 +67,10 @@
 from ._generated.pyarmnn import DataType_Float16, DataType_Float32, DataType_QAsymmU8, DataType_Signed32, \
     DataType_Boolean, DataType_QSymmS16, DataType_QSymmS8, DataType_QAsymmS8
 from ._generated.pyarmnn import DataLayout_NCHW, DataLayout_NHWC
+from ._generated.pyarmnn import MemorySource_Malloc, MemorySource_Undefined, MemorySource_DmaBuf, \
+    MemorySource_DmaBufProtected
+from ._generated.pyarmnn import ProfilingDetailsMethod_Undefined, ProfilingDetailsMethod_DetailsWithEvents, \
+    ProfilingDetailsMethod_DetailsOnly
 
 from ._generated.pyarmnn import ActivationFunction_Abs, ActivationFunction_BoundedReLu, ActivationFunction_LeakyReLu, \
     ActivationFunction_Linear, ActivationFunction_ReLu, ActivationFunction_Sigmoid, ActivationFunction_SoftReLu, \
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i b/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i
index 073fada..bc8228a 100644
--- a/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_deserializer.i
@@ -6,7 +6,6 @@
 %{
 #include "armnnDeserializer/IDeserializer.hpp"
 #include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
 #include "armnn/INetwork.hpp"
 #include "armnn/Exceptions.hpp"
 #include <string>
diff --git a/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i b/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
index d2d79cc..3ed5d6b 100644
--- a/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
+++ b/python/pyarmnn/src/pyarmnn/swig/armnn_tfliteparser.i
@@ -6,7 +6,6 @@
 %{
 #include "armnnTfLiteParser/ITfLiteParser.hpp"
 #include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
 #include "armnn/INetwork.hpp"
 %}
 
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
index 00b835b..a050722 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_descriptors.i
@@ -5,7 +5,6 @@
 %{
 #include "armnn/Descriptors.hpp"
 #include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
 %}
 
 namespace std {
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index 7dc88ac..f4581ca 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -6,7 +6,6 @@
 #include "armnn/INetwork.hpp"
 #include "armnn/BackendId.hpp"
 #include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
 #include "armnn/Optional.hpp"
 #include <fstream>
 %}
@@ -989,7 +988,7 @@
                                                      const armnn::ConstTensor& weights,
                                                      armnn::ConstTensor* biases = nullptr,
                                                      const char* name = nullptr) {
-
+        ARMNN_NO_DEPRECATE_WARN_BEGIN
         if (biases) {
             return $self->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
                                                  armnn::Optional<armnn::ConstTensor>(*biases), name);
@@ -997,7 +996,7 @@
             return $self->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
                                                  armnn::Optional<armnn::ConstTensor>(), name);
         }
-
+        ARMNN_NO_DEPRECATE_WARN_END
     }
 
     %feature("docstring",
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
index ec65cc0..e56464d 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_runtime.i
@@ -4,6 +4,7 @@
 //
 %{
 #include "armnn/IRuntime.hpp"
+#include "armnn/Deprecated.hpp"
 #include <iostream>
 #include <ostream>
 #include <sstream>
@@ -97,25 +98,43 @@
     ExternalProfilingOptions m_ProfilingOptions;
 };
 
+%{
+typedef armnn::INetworkProperties INetworkProperties;
+%}
+
 namespace armnn
 {
 
+%nodefaultctor INetworkProperties;
 struct INetworkProperties
 {
     %feature("docstring",
-    "
+             "
     Structure for holding network properties.
 
     Contains:
-        m_ImportEnabled (bool): Enable import.
-
-        m_ExportEnabled (bool): Enable export.
+        m_AsyncEnabled (bool): Enable asynchronous execution of multiple network.
+        m_InputSource (MemorySource): When inputs are imported this defines the type of the imported memory.
+        m_OutputSource (MemorySource): When outputs are imported this defines the type of the imported memory.
+        m_ProfilingEnabled (bool): Enable profiling.
+        ProfilingDetailsMethod (ProfilingDetailsMethod): Customize profiling details.
 
     ") INetworkProperties;
-    INetworkProperties(bool importEnabled = false, bool exportEnabled = false);
+    INetworkProperties(bool asyncEnabled,
+                       MemorySource inputSource,
+                       MemorySource outputSource,
+                       bool profilingEnabled = false,
+                       ProfilingDetailsMethod detailsMethod = ProfilingDetailsMethod::Undefined);
 
-    const bool m_ImportEnabled;
-    const bool m_ExportEnabled;
+
+    const bool m_AsyncEnabled;
+
+    const bool m_ProfilingEnabled;
+
+    const ProfilingDetailsMethod m_OutputNetworkDetailsMethod;
+
+    const MemorySource m_InputSource;
+    const MemorySource m_OutputSource;
 };
 
 %feature("docstring",
@@ -293,5 +312,27 @@
 
 }
 
+%extend INetworkProperties {
+    %feature("docstring",
+             "
+    Structure for holding network properties.
+
+            Contains:
+    m_ImportEnabled (bool): Enable import.
+
+            m_ExportEnabled (bool): Enable export.
+
+    ") INetworkProperties;
+    INetworkProperties(bool importEnabled = false, bool exportEnabled = false) {
+        ARMNN_NO_DEPRECATE_WARN_BEGIN
+        return new INetworkProperties(importEnabled, exportEnabled);
+        ARMNN_NO_DEPRECATE_WARN_END
+    }
+    %pythonprepend INetworkProperties(bool, bool) %{
+        import warnings
+        warnings.warn("Deprecated: Use constructor with MemorySource argument instead.", DeprecationWarning)
+    %}
+}
+
 }
 
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
index b838fce..83da455 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_types.i
@@ -4,12 +4,10 @@
 //
 %{
 #include "armnn/Types.hpp"
-#include "ProfilingGuid.hpp"
 %}
 
 %include <typemaps/permutation_vector.i>
 
-
 namespace armnn
 {
 
@@ -106,12 +104,10 @@
 
 ") GetSupportedBackends;
 
-%ignore ProfilingGuid;
 %ignore PermutationVector;
 #define ARMNN_DEPRECATED_ENUM  // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp.
 #define ARMNN_DEPRECATED_ENUM_MSG(message)  // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp.
 %include "armnn/Types.hpp"
-%include "ProfilingGuid.hpp"
 
 
 
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index ff0ad40..295c870 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -3,6 +3,7 @@
 import os
 
 import pytest
+import warnings
 import numpy as np
 
 import pyarmnn as ann
@@ -156,6 +157,30 @@
     assert "" == messages
     assert net_id == 0
 
+def test_network_properties_constructor(random_runtime):
+    preferred_backends = random_runtime[0]
+    network = random_runtime[1]
+    runtime = random_runtime[2]
+
+    opt_network, _ = ann.Optimize(network, preferred_backends,
+                                  runtime.GetDeviceSpec(), ann.OptimizerOptions())
+
+    inputSource = ann.MemorySource_Undefined
+    outputSource = ann.MemorySource_Undefined
+    properties = ann.INetworkProperties(True, inputSource, outputSource)
+    assert properties.m_AsyncEnabled == True
+    assert properties.m_ProfilingEnabled == False
+    assert properties.m_OutputNetworkDetailsMethod == ann.ProfilingDetailsMethod_Undefined
+    assert properties.m_InputSource == ann.MemorySource_Undefined
+    assert properties.m_OutputSource == ann.MemorySource_Undefined
+
+    net_id, messages = runtime.LoadNetwork(opt_network, properties)
+    assert "" == messages
+    assert net_id == 0
+
+def test_network_properties_deprecated_constructor():
+    with pytest.warns(DeprecationWarning):
+        warnings.warn("Deprecated: Use constructor with MemorySource argument instead.", DeprecationWarning)
 
 def test_unload_network_fails_for_invalid_net_id(random_runtime):
     preferred_backends = random_runtime[0]
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 1616fd1..cc792a0 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -136,15 +136,6 @@
     return false;
 }
 
-
-bool LayerSupportHandle::IsAbsSupported(const TensorInfo& input,
-                                        const TensorInfo& output,
-                                        Optional<std::string&> reasonIfUnsupported)
-{
-    // Call the IsXXXLayerSupport function of the specific backend.
-    return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
-}
-
 bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
                                                const TensorInfo& output,
                                                const ActivationDescriptor& descriptor,
@@ -388,14 +379,6 @@
     return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
 }
 
-bool LayerSupportHandle::IsEqualSupported(const TensorInfo& input0,
-                                          const TensorInfo& input1,
-                                          const TensorInfo& output,
-                                          Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
-}
-
 bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
                                                      const FakeQuantizationDescriptor& descriptor,
                                                      Optional<std::string&> reasonIfUnsupported)
@@ -478,28 +461,12 @@
 bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
                                            const TensorInfo& input1,
                                            const TensorInfo& output,
-                                           Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
-}
-
-bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
-                                           const TensorInfo& input1,
-                                           const TensorInfo& output,
                                            const GatherDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported)
 {
     return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
 }
 
-bool LayerSupportHandle::IsGreaterSupported(const TensorInfo& input0,
-                                            const TensorInfo& input1,
-                                            const TensorInfo& ouput,
-                                            Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
-}
-
 bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
                                           Optional<std::string&> reasonIfUnsupported)
 {
@@ -613,14 +580,6 @@
     return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
 }
 
-bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                           const TensorInfo& output,
-                                           const OriginsDescriptor& descriptor,
-                                           Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
-}
-
 bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
                                             const TensorInfo& input1,
                                             const TensorInfo& output,
@@ -758,13 +717,6 @@
     return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
 }
 
-bool LayerSupportHandle::IsResizeBilinearSupported(const TensorInfo& input,
-                                                   const TensorInfo& output,
-                                                   Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
-}
-
 bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
                                            const TensorInfo& output,
                                            const ResizeDescriptor& descriptor,
@@ -773,13 +725,6 @@
     return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
 }
 
-bool LayerSupportHandle::IsRsqrtSupported(const TensorInfo& input,
-                                          const TensorInfo& output,
-                                          Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
-}
-
 bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
                                           const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported)
@@ -820,13 +765,6 @@
 }
 
 bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
-                                             const ViewsDescriptor& descriptor,
-                                             Optional<std::string&> reasonIfUnsupported)
-{
-    return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
-}
-
-bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
                                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                                              const ViewsDescriptor& descriptor,
                                              Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
index 1a663d3..e24d5df 100644
--- a/src/armnn/CompatibleTypes.hpp
+++ b/src/armnn/CompatibleTypes.hpp
@@ -46,11 +46,8 @@
 template<>
 inline bool CompatibleTypes<int8_t>(DataType dataType)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     return dataType == DataType::QSymmS8
-        || dataType == DataType::QuantizedSymm8PerAxis
         || dataType == DataType::QAsymmS8;
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 template<>
diff --git a/src/armnn/ISubgraphViewConverter.hpp b/src/armnn/ISubgraphViewConverter.hpp
index 34789a2..2e108e1 100644
--- a/src/armnn/ISubgraphViewConverter.hpp
+++ b/src/armnn/ISubgraphViewConverter.hpp
@@ -25,9 +25,4 @@
     virtual std::vector<CompiledBlobPtr> CompileNetwork() = 0;
 };
 
-///
-/// Old ISubGraphConverter definition kept for backward compatibility only.
-///
-using ISubGraphConverter ARMNN_DEPRECATED_MSG("This type is no longer supported") = ISubgraphViewConverter;
-
 } // namespace armnn
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 9eaa97c..4cb7492 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -305,18 +305,6 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
 }
 
-ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
-bool IsGatherSupported(const BackendId& backend,
-                       const TensorInfo& input0,
-                       const TensorInfo& input1,
-                       const TensorInfo& output,
-                       char* reasonIfUnsupported,
-                       size_t reasonIfUnsupportedMaxLength)
-{
-    const GatherDescriptor descriptor{};
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output, descriptor);
-}
-
 bool IsGatherSupported(const BackendId& backend,
                        const TensorInfo& input0,
                        const TensorInfo& input1,
@@ -423,21 +411,6 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
 }
 
-ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-bool IsMergerSupported(const BackendId& backend,
-                       std::vector<const TensorInfo*> inputs,
-                       const TensorInfo& output,
-                       const OriginsDescriptor& descriptor,
-                       char* reasonIfUnsupported,
-                       size_t reasonIfUnsupportedMaxLength)
-{
-    ARMNN_ASSERT(inputs.size() > 0);
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
-    ARMNN_NO_DEPRECATE_WARN_END
-}
-
 bool IsMinimumSupported(const BackendId& backend,
                         const TensorInfo& input0,
                         const TensorInfo& input1,
@@ -589,36 +562,6 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
 }
 
-ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-bool IsResizeBilinearSupported(const BackendId& backend,
-                               const TensorInfo& input,
-                               const TensorInfo& output,
-                               char* reasonIfUnsupported,
-                               size_t reasonIfUnsupportedMaxLength)
-{
-    ResizeDescriptor descriptor;
-    descriptor.m_Method = ResizeMethod::Bilinear;
-
-    const TensorShape& outputShape = output.GetShape();
-    descriptor.m_TargetWidth  = outputShape[3];
-    descriptor.m_TargetHeight = outputShape[2];
-
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
-}
-
-bool IsRsqrtSupported(const BackendId& backend,
-                      const TensorInfo& input,
-                      const TensorInfo& output,
-                      char* reasonIfUnsupported,
-                      size_t reasonIfUnsupportedMaxLength)
-{
-    FORWARD_LAYER_SUPPORT_FUNC(backend,
-                               IsElementwiseUnarySupported,
-                               input,
-                               output,
-                               ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt));
-}
-
 bool IsSoftmaxSupported(const BackendId& backend,
                         const TensorInfo& input,
                         const TensorInfo& output,
@@ -649,18 +592,6 @@
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
 }
 
-ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-bool IsSplitterSupported(const BackendId& backend,
-                         const TensorInfo& input,
-                         const ViewsDescriptor& descriptor,
-                         char* reasonIfUnsupported,
-                         size_t reasonIfUnsupportedMaxLength)
-{
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
-    ARMNN_NO_DEPRECATE_WARN_END
-}
-
 bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
                          const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4070802..a39b6b1 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -139,27 +139,6 @@
 }
 
 
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
-    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-    const ConstTensor& weights,
-    const char* name)
-{
-    Optional<ConstTensor> biases;
-    return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
-}
-
-
-IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
-    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-    const ConstTensor& weights,
-    const ConstTensor& biases,
-    const char* name)
-{
-    return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
-                                                        armnn::Optional<ConstTensor>(biases), name);
-}
-
-
 IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
 {
     return pNetworkImpl->AddDequantizeLayer(name);
@@ -264,17 +243,6 @@
     return pNetworkImpl->AddMergeLayer(name);
 }
 
-IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
-                                            const char* name)
-{
-    return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
-}
-
-IConnectableLayer* INetwork::AddAbsLayer(const char* name)
-{
-    return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
 IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
 {
     return pNetworkImpl->AddAdditionLayer(name);
@@ -300,20 +268,6 @@
     return pNetworkImpl->AddRankLayer(name);
 }
 
-IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
-                                                    const char* name)
-{
-    ResizeDescriptor resizeDescriptor;
-    resizeDescriptor.m_Method           = ResizeMethod::Bilinear;
-    resizeDescriptor.m_DataLayout       = descriptor.m_DataLayout;
-    resizeDescriptor.m_TargetWidth      = descriptor.m_TargetWidth;
-    resizeDescriptor.m_TargetHeight     = descriptor.m_TargetHeight;
-    resizeDescriptor.m_AlignCorners     = descriptor.m_AlignCorners;
-    resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
-
-    return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
-}
-
 IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
                                             const char* name)
 {
@@ -426,27 +380,6 @@
     return pNetworkImpl->AddMinimumLayer(name);
 }
 
-IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
-{
-    return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
-}
-
-IConnectableLayer* INetwork::AddEqualLayer(const char* name)
-{
-    return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
-}
-
-IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
-{
-    return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-IConnectableLayer* INetwork::AddGatherLayer(const char* name)
-{
-    GatherDescriptor gatherDescriptor{};
-    return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
-}
-
 IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
                                             const char* name)
 {
@@ -527,10 +460,12 @@
     return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void INetwork::Accept(ILayerVisitor& visitor) const
 {
     return pNetworkImpl->Accept(visitor);
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void INetwork::ExecuteStrategy(IStrategy& strategy) const
 {
@@ -1774,23 +1709,6 @@
     Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
     Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
 
-    // Run backend specific optimizations (deprecated)
-    for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
-    {
-        auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
-        auto backendPtr = factoryFun();
-        ARMNN_ASSERT(backendPtr.get() != nullptr);
-
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        auto backendSpecificOptimizations = backendPtr->GetOptimizations();
-        ARMNN_NO_DEPRECATE_WARN_END
-
-        if (!backendSpecificOptimizations.empty())
-        {
-            Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
-        }
-    }
-
     return optNet;
 }
 bool NetworkImpl::GetShapeInferenceMethod()
@@ -1938,15 +1856,6 @@
     return layer;
 }
 
-IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                                       const ConstTensor& weights,
-                                                       const Optional<ConstTensor>& biases,
-                                                       const char* name)
-{
-    Optional<ConstTensor> optionalWeights(weights);
-    return AddFullyConnectedLayer(fullyConnectedDescriptor, optionalWeights, biases, name);
-}
-
 IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
                                            const char* name)
 {
@@ -2060,25 +1969,6 @@
     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
-    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-    const ConstTensor& weights,
-    const char* name)
-{
-    Optional<ConstTensor> biases;
-    return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
-}
-
-IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
-    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-    const ConstTensor& weights,
-    const ConstTensor& biases,
-    const char* name)
-{
-    Optional<ConstTensor> optionalBiases(biases);
-    return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
-}
-
 IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
                                                          const ConstTensor& anchors, const char* name)
 {
@@ -2147,17 +2037,6 @@
     return m_Graph->AddLayer<MinimumLayer>(name);
 }
 
-IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
-                                           const char* name)
-{
-    return AddConcatLayer(mergerDescriptor, name);
-}
-
-IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
-{
-    return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
-}
-
 IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
 {
     return m_Graph->AddLayer<AdditionLayer>(name);
@@ -2201,20 +2080,6 @@
     return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
 }
 
-IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
-                                                       const char* name)
-{
-    ResizeDescriptor resizeDescriptor;
-    resizeDescriptor.m_Method           = ResizeMethod::Bilinear;
-    resizeDescriptor.m_DataLayout       = descriptor.m_DataLayout;
-    resizeDescriptor.m_TargetWidth      = descriptor.m_TargetWidth;
-    resizeDescriptor.m_TargetHeight     = descriptor.m_TargetHeight;
-    resizeDescriptor.m_AlignCorners     = descriptor.m_AlignCorners;
-    resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
-
-    return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
-}
-
 IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
 {
     return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
@@ -2452,27 +2317,6 @@
     return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
 }
 
-IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
-{
-    return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
-}
-
-IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
-{
-    return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
-}
-
-IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
-{
-    return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
-}
-
-IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
-{
-    GatherDescriptor gatherDescriptor{};
-    return AddGatherLayer(gatherDescriptor, name);
-}
-
 IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
                                            const char* name)
 {
@@ -2863,6 +2707,7 @@
     return layer;
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void NetworkImpl::Accept(ILayerVisitor& visitor) const
 {
     for (auto layer : GetGraph())
@@ -2870,6 +2715,7 @@
         layer->Accept(visitor);
     };
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 11759c7..eb1d39d 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -41,9 +41,6 @@
 
     IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    IConnectableLayer* AddAbsLayer(const char* name = nullptr);
-
     IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
                                           const char* name = nullptr);
 
@@ -78,12 +75,12 @@
                                              const Optional<ConstTensor>& biases,
                                              const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const ConstTensor& biases,
@@ -105,19 +102,6 @@
         const Optional<ConstTensor>& biases,
         const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
-    IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const char* name = nullptr);
-
-    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
-    IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const ConstTensor& biases,
-        const char* name = nullptr);
-
     IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
 
     IConnectableLayer* AddDetectionPostProcessLayer(
@@ -130,9 +114,6 @@
     IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                                 const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    IConnectableLayer* AddEqualLayer(const char* name = nullptr);
-
     IConnectableLayer* AddMergeLayer(const char* name = nullptr);
 
     IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
@@ -148,21 +129,9 @@
                                               const Optional<ConstTensor>& biases,
                                               const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
-    IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                              const ConstTensor& weights,
-                                              const Optional<ConstTensor>& biases,
-                                              const char* name = nullptr);
-
-    ARMNN_DEPRECATED_MSG("This AddGatherLayer overload is deprecated")
-    IConnectableLayer* AddGatherLayer(const char* name = nullptr);
-
     IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
                                       const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
-
     IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
                                                      const char* name = nullptr);
 
@@ -185,10 +154,6 @@
 
     IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
-    IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
-                                      const char* name = nullptr);
-
     IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
 
     IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
@@ -220,19 +185,12 @@
     IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
                                       const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
-    IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
-                                              const char* name = nullptr);
-
     IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
                                       const char* name = nullptr);
 
     IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
                                        const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
-
     IConnectableLayer* AddShapeLayer(const char* name = nullptr);
 
     IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
@@ -274,7 +232,9 @@
                                                           const LstmInputParams& params,
                                                           const char* name = nullptr);
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const;
 
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index da2c39d..3fc93df 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -378,16 +378,6 @@
     fn("TargetShape",ss.str());
 }
 
-void StringifyLayerParameters<ResizeBilinearDescriptor>::Serialize(ParameterStringifyFunction& fn,
-                                                                   const ResizeBilinearDescriptor& desc)
-{
-    fn("TargetWidth", std::to_string(desc.m_TargetWidth));
-    fn("TargetHeight", std::to_string(desc.m_TargetHeight));
-    fn("DataLayout", GetDataLayoutName(desc.m_DataLayout));
-    fn("AlignCorners", std::to_string(desc.m_AlignCorners));
-    fn("HalfPixelCenters", std::to_string(desc.m_HalfPixelCenters));
-}
-
 void StringifyLayerParameters<ResizeDescriptor>::Serialize(ParameterStringifyFunction& fn,
                                                            const ResizeDescriptor& desc)
 {
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 8a3630c..5c1e6f3 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -139,11 +139,6 @@
     static void Serialize(ParameterStringifyFunction& fn, const ReshapeDescriptor& desc);
 };
 
-template <> struct StringifyLayerParameters<ResizeBilinearDescriptor>
-{
-    static void Serialize(ParameterStringifyFunction& fn, const ResizeBilinearDescriptor& desc);
-};
-
 template <> struct StringifyLayerParameters<ResizeDescriptor>
 {
     static void Serialize(ParameterStringifyFunction& fn, const ResizeDescriptor& desc);
diff --git a/src/armnn/SubgraphView.hpp b/src/armnn/SubgraphView.hpp
index cb9e415..af60542 100644
--- a/src/armnn/SubgraphView.hpp
+++ b/src/armnn/SubgraphView.hpp
@@ -98,10 +98,4 @@
     /// The list of pointers to the layers of the parent graph.
     Layers m_Layers;
 };
-
-///
-/// Old SubGraph definition kept for backward compatibility only.
-///
-using SubGraph ARMNN_DEPRECATED_MSG("SubGraph is deprecated, use SubgraphView instead") = SubgraphView;
-
 } // namespace armnn
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 7aa4099..e103b7f 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -46,9 +46,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void AbsLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitAbsLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 0e5ccb0..9ab6662 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -28,7 +28,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create an AbsLayer.
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 7bfa28e..3abb4c4 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -45,9 +45,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ActivationLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitActivationLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 5ffcc3e..47b7f66 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -26,7 +26,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 
 protected:
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index 8b1f2a8..b6db706 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -32,9 +32,11 @@
     return CloneBase<AdditionLayer>(graph, GetName());
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void AdditionLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitAdditionLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 4af576a..71a8553 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -23,7 +23,9 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     AdditionLayer* Clone(Graph& graph) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create an AdditionLayer.
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 219f346..5e469a4 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -86,9 +86,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 761d4a0..f212536 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -34,7 +34,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ArgMinMaxLayer.
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index e3ee643..e52b986 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -70,6 +70,7 @@
     return {m_Mean, m_Variance, m_Beta, m_Gamma};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
 {
     ManagedConstTensorHandle managedMean(m_Mean);
@@ -85,6 +86,7 @@
     visitor.VisitBatchNormalizationLayer(
             this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 2777633..10ca7ec 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -39,7 +39,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 4b33b96..0b6eab5 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -95,9 +95,11 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void BatchToSpaceNdLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitBatchToSpaceNdLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index da7585b..bb6eb71 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -34,7 +34,9 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a BatchToSpaceNdLayer.
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index 16dd9a3..485bbf0 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -46,10 +46,12 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void CastLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("CastLayer VisitCastLayer is not implemented");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/CastLayer.hpp b/src/armnn/layers/CastLayer.hpp
index 8a9ea43..e044813 100644
--- a/src/armnn/layers/CastLayer.hpp
+++ b/src/armnn/layers/CastLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a CastLayer.
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index a3b85f1..884f3ab 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -46,9 +46,12 @@
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
 }
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ChannelShuffleLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("ChannelShuffleLayer: VisitChannelShuffleLayer is not implemented");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 }
\ No newline at end of file
diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp
index 399b651..903d161 100644
--- a/src/armnn/layers/ChannelShuffleLayer.hpp
+++ b/src/armnn/layers/ChannelShuffleLayer.hpp
@@ -11,7 +11,9 @@
 class ChannelShuffleLayer : public LayerWithParameters<ChannelShuffleDescriptor>
 {
 public:
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     /// Creates a dynamically-allocated copy of this layer.
     /// @param graph The graph into which this layer is being cloned
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 399834d..c644cb1 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -74,9 +74,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ComparisonLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitComparisonLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index bcb0dc2..07534af 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -35,7 +35,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ComparisonLayer
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 238fdb6..892c18e 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -318,9 +318,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ConcatLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitConcatLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 4315d66..fefedea 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -44,7 +44,9 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ConcatLayer.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index feeb762..e738e59 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -62,12 +62,14 @@
                outShape);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ConstantLayer::Accept(ILayerVisitor& visitor) const
 {
     ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
     ConstTensor layerOutputTensor(managedLayerOutput.GetTensorInfo(), managedLayerOutput.Map());
     visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index ead8816..a9a9d37 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -39,7 +39,9 @@
     /// Free up the constant source data stored by the layer.
     void ReleaseConstantData() override {}
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 3577723..b7fa3a6 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -47,6 +47,7 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
 {
     // these conversion layers are only inserted by the
@@ -54,5 +55,6 @@
     IgnoreUnused(visitor);
     throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index d9df0bd..d2c0066 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ConvertBf16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 3b6f72c..77e6f66 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -47,6 +47,7 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
 {
     // these conversion layers are only inserted by the
@@ -54,5 +55,6 @@
     IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index 4eadb9f..59faf64 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ConvertFp16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index f909769..6a003dc 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -48,6 +48,7 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
 {
     // these conversion layers are only inserted by the
@@ -55,5 +56,6 @@
     IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp32ToBf16Layer should never appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 57fbe13..8e33cb2 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ConvertFp32ToBf16Layer.
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 3e6f055..8c96909 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,6 +47,7 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
 {
     // These conversion layers are only inserted by the
@@ -54,5 +55,6 @@
     IgnoreUnused(visitor);
     throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index 5652a47..e331c7d 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -27,7 +27,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ConvertFp32ToFp16Layer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index b7bf046..ae29d83 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -143,6 +143,7 @@
     return {m_Weight, m_Bias};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
 {
     ManagedConstTensorHandle managedWeight(m_Weight);
@@ -158,6 +159,7 @@
 
     visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index a33cda2..8447478 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -42,7 +42,9 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index ade09ed..07d59be 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -52,11 +52,13 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void DebugLayer::Accept(ILayerVisitor& visitor) const
 {
     // by design debug layers are never in input graphs
     IgnoreUnused(visitor);
     throw armnn::Exception("DebugLayer should never appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index e71e05a..054f5e4 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a DebugLayer.
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index dfa575b..ba06ad6 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -75,9 +75,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitDepthToSpaceLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index 0730d4d..d9f6752 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -35,7 +35,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a DepthToSpaceLayer.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index ed52b39..86c9947 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -148,6 +148,7 @@
     return {m_Weight, m_Bias};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
 {
     ManagedConstTensorHandle managedWeight(m_Weight);
@@ -163,6 +164,7 @@
 
     visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 51f6ea9..8f8f020 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -41,7 +41,9 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index cbe9ae1..f8a2e05 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -46,9 +46,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void DequantizeLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitDequantizeLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index a5750dd..99bde85 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a DequantizeLayer.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index bd94d1d..41c44d0 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -78,6 +78,7 @@
     return { m_Anchors };
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
 {
     ManagedConstTensorHandle managedAnchors(m_Anchors);
@@ -85,6 +86,7 @@
     visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
     m_Anchors->Unmap();
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index b409134..1826645 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -34,7 +34,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index 5b032ce..17b671a 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -32,9 +32,11 @@
     return CloneBase<DivisionLayer>(graph, GetName());
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void DivisionLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitDivisionLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 4427a4c..91bccfc 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -24,7 +24,9 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     DivisionLayer* Clone(Graph& graph) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a DivisionLayer.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 8c94106..6f07cf9 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -61,9 +61,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index f6f8862..1261882 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -34,7 +34,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a ElementwiseUnaryLayer
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 102a672..69f0166 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -46,11 +46,13 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 78e49e6..c115c63 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 41471c3..45fe072 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -51,9 +51,11 @@
         inferredShapes[0][0]);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void FillLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitFillLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index eeed141..096d9ba 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -27,7 +27,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a FillLayer.
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index e03bdb1..a975ee8 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -45,9 +45,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void FloorLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitFloorLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 07cf151..2b16cfa 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a FloorLayer.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 259d414..2c41d74 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -80,10 +80,12 @@
     return {m_Weight, m_Bias};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitFullyConnectedLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index 5639bf2..e97282d 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,7 +43,9 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index cdbdaab..e8b67b8 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -83,9 +83,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void GatherLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitGatherLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 3bc8c69..8c29407 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -34,7 +34,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a GatherLayer.
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index 0f96611..21246f1 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -35,9 +35,11 @@
                                                "InputLayer should already have the TensorInfo set.");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void InputLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitInputLayer(this, this->GetBindingId(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index ff6b521..2b73dce 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create an InputLayer.
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 87c6877..657b442 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -46,9 +46,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitInstanceNormalizationLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index 799cf28..addd61e 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a InstanceNormalizationLayer.
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index c96e708..7bddbf1 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -46,9 +46,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitL2NormalizationLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 5d58077..21072b2 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a L2NormalizationLayer.
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 24e79ce..ea25182 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -45,9 +45,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitLogSoftmaxLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index b21bece..9963f85 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -29,7 +29,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a LogSoftmaxLayer.
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 0ae5ea5..3940b85 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -72,9 +72,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp
index c6b024b..caeaa0a 100644
--- a/src/armnn/layers/LogicalBinaryLayer.hpp
+++ b/src/armnn/layers/LogicalBinaryLayer.hpp
@@ -35,7 +35,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a LogicalBinaryLayer
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 0fea668..a18fdb0 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -300,6 +300,7 @@
             m_LayerNormParameters.m_OutputLayerNormWeights};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void LstmLayer::Accept(ILayerVisitor& visitor) const
 {
     LstmInputParams inputParams;
@@ -509,6 +510,7 @@
 
     visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index dc6d12a..fbcc03d 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -44,7 +44,9 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 608a71e..6defdab 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -41,10 +41,12 @@
     ARMNN_ASSERT(GetNumOutputSlots() == 0);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MapLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("MapLayer should not appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MapLayer.hpp b/src/armnn/layers/MapLayer.hpp
index 620caf7..d82c44a 100644
--- a/src/armnn/layers/MapLayer.hpp
+++ b/src/armnn/layers/MapLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a MapLayer.
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index d57e9e6..95faeea 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -31,9 +31,11 @@
     return CloneBase<MaximumLayer>(graph, GetName());
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MaximumLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitMaximumLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index 743f79b..f032b88 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -24,7 +24,9 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     MaximumLayer* Clone(Graph& graph) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a MaximumLayer.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index 9d4265c..b704e2a 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -103,9 +103,11 @@
     ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MeanLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitMeanLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 3a094bf..94b0cbe 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -29,7 +29,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
 protected:
     /// Constructor to create a MeanLayer.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 40c1b98..61fa462 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -49,11 +49,13 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MemCopyLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("MemCopyLayer should not appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index b913c52..3c6fd0d 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index c96f92b..689678e 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -49,11 +49,13 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MemImportLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("MemImportLayer should not appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 4737970..7787701 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -28,7 +28,9 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index 74a31a8..2bd29f2 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -58,9 +58,11 @@
     return {inputShapes[0]};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MergeLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitMergeLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 07f6900..d7cfcf3 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -33,7 +33,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a MergeLayer.
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index f60815e..38ab442 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -32,9 +32,11 @@
     return CloneBase<MinimumLayer>(graph, GetName());
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MinimumLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitMinimumLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 2db0629..634591e 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -24,7 +24,10 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     MinimumLayer* Clone(Graph& graph) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a MinimumLayer.
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 8fc13ac..4ff188c 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -32,9 +32,11 @@
     return CloneBase<MultiplicationLayer>(graph, GetName());
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void MultiplicationLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitMultiplicationLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 692f407..8acf4f6 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -24,7 +24,10 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     MultiplicationLayer* Clone(Graph& graph) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a MultiplicationLayer.
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 4bf97ed..bd38fa4 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -46,9 +46,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void NormalizationLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitNormalizationLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 00a4435..e36e886 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -28,7 +28,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a NormalizationLayer.
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index d14337f..579aede 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -37,9 +37,11 @@
                                                "OutputLayer: Input slot must be connected.");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void OutputLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitOutputLayer(this, GetBindingId(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 408a28a..d2bdf19 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -40,7 +40,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create an OutputLayer.
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 2c53f20..78af9d3 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -71,9 +71,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void PadLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitPadLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index 5664997..9a31ae5 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -35,7 +35,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape> &inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a PadLayer.
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 859e687..1c563ad 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -57,9 +57,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void PermuteLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitPermuteLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index 67be2e1..db256b3 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -60,7 +60,10 @@
                GetPermutation().IsEqual(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation());
     }
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a PermuteLayer.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 0deafaa..d22bce2 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -117,9 +117,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitPooling2dLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 90c9a44..677c10b 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -34,7 +34,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a Pooling2dLayer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 67c1db4..14dffe5 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -49,11 +49,13 @@
     m_PreCompiledObject = std::move(preCompiledObject);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index 0db1472..e2c5e80 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -33,7 +33,10 @@
 
     void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 18d81ae..9fb9f07 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -116,9 +116,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void PreluLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitPreluLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 511be29..eecffbc 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -35,7 +35,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a PreluLayer.
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 0294afd..493e3fe 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -302,6 +302,7 @@
             m_LayerNormParameters.m_OutputLayerNormWeights};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void QLstmLayer::Accept(ILayerVisitor& visitor) const
 {
     LstmInputParams inputParams;
@@ -531,6 +532,7 @@
 
     visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 
 void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 38a0464..12774a9 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -107,7 +107,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 6ce28c41..e37d6f5 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -45,9 +45,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void QuantizeLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitQuantizeLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index 2f331a4..d8898ba 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -23,7 +23,10 @@
 
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     QuantizeLayer(const char* name);
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index be50f48..8164219 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -169,6 +169,7 @@
     };
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
 {
     QuantizedLstmInputParams inputParams;
@@ -305,6 +306,7 @@
 
     visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 25cc7b7..fe7d423 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -69,7 +69,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 3b14ef0..a1e06ef 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -41,10 +41,13 @@
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
     ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
 }
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void RankLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitRankLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void RankLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index fbd2824..416e1b0 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -22,7 +22,10 @@
 
         void ValidateTensorShapesFromInputs() override;
 
+        ARMNN_NO_DEPRECATE_WARN_BEGIN
         void Accept(ILayerVisitor& visitor) const override;
+        ARMNN_NO_DEPRECATE_WARN_END
+
 
         void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 31a2dfa..07651fc 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -102,9 +102,11 @@
     ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ReduceLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitReduceLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp
index fd4f207..a6ac44e 100644
--- a/src/armnn/layers/ReduceLayer.hpp
+++ b/src/armnn/layers/ReduceLayer.hpp
@@ -27,7 +27,10 @@
     /// will lead to a valid configuration of @ref ReduceLayer.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a ReduceLayer.
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index f303ff7..1b9e691 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -53,9 +53,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ReshapeLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitReshapeLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index 78335e6..d107b5c 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -45,7 +45,10 @@
                m_Param.m_TargetShape == PolymorphicDowncast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape;
     }
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a ReshapeLayer.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 3a390d4..c190f49 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -75,9 +75,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ResizeLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitResizeLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index 3462585..fab18c7 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -34,7 +34,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a ResizeLayer.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 9c09701..a0572da 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -47,9 +47,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void RsqrtLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitRsqrtLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index 4fcbf72..a31aea6 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -28,7 +28,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create an RsqrtLayer.
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index 4193fa9..6a55a2d 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -59,11 +59,13 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void ShapeLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("ShapeLayer VisitShapeLayer is not implemented");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void ShapeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/ShapeLayer.hpp b/src/armnn/layers/ShapeLayer.hpp
index fee285c..35ef873 100644
--- a/src/armnn/layers/ShapeLayer.hpp
+++ b/src/armnn/layers/ShapeLayer.hpp
@@ -34,7 +34,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index b512ca4..e7d8f1e 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -59,9 +59,11 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SliceLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSliceLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index 0505a05..dda66a1 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -34,7 +34,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SliceLayer.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 9882da4..eab5b85 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -46,9 +46,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSoftmaxLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index cbdd7c5..035e7bc 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -28,7 +28,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SoftmaxLayer.
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index b9e3331..3f58b3f 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -83,9 +83,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 28857d8..70972bd 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -35,7 +35,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SpaceToBatchNdLayer.
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 90ba8fc..1a3112c 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -77,9 +77,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index a8bc108..267ac3b 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -35,7 +35,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SpaceToDepthLayer.
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 5e6622e..c1e191c 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -241,9 +241,11 @@
     }
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SplitterLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSplitterLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index f90696b..1fc37ef 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -43,7 +43,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SplitterLayer.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 11935a1..fe2d123 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -95,9 +95,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void StackLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitStackLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 3d05da0..8d38907 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -34,7 +34,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a StackLayer.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index 6281f3e..ccf1529 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -41,8 +41,10 @@
     // so do nothing here.
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void StandInLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitStandInLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 } // namespace armnn
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index 2864753..bb50006 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -37,7 +37,10 @@
 
     /// Accepts a visitor object and calls VisitStandInLayer() method.
     /// @param visitor The visitor on which to call VisitStandInLayer() method.
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a StandInLayer.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index c8f3635..aa7012c 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -111,9 +111,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitStridedSliceLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 35ac370..7e17cb2 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -34,7 +34,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a StridedSliceLayer.
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 34087bd..bed7085 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -32,9 +32,11 @@
     return CloneBase<SubtractionLayer>(graph, GetName());
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SubtractionLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSubtractionLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 527b50b..8c31479 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -24,7 +24,10 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     SubtractionLayer* Clone(Graph& graph) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SubtractionLayer.
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 8792639..258a7ff 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -52,9 +52,11 @@
             GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void SwitchLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitSwitchLayer(this, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index 025f379..a36261b 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -28,7 +28,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a SwitchLayer.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index c774dd0..acdbebe 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -121,6 +121,7 @@
     return {m_Weight, m_Bias};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
 {
     ManagedConstTensorHandle managedWeight(m_Weight);
@@ -136,6 +137,7 @@
 
     visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 1b17dac..b6db41c 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -40,7 +40,10 @@
     /// @return A vector of the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 8951fe4..ffd8693 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -57,9 +57,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void TransposeLayer::Accept(ILayerVisitor& visitor) const
 {
     visitor.VisitTransposeLayer(this, GetParameters(), GetName());
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index a424524..8449db4 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -58,7 +58,10 @@
                GetPermutation().IsEqual(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation());
     }
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a TransposeLayer.
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 4541706..a3671a0 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -307,11 +307,13 @@
             m_LayerNormParameters.m_OutputLayerNormWeights};
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void UnidirectionalSequenceLstmLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("UnidirectionalSequenceLstmLayer: VisitUnidirectionalSequenceLstmLayer is not implemented");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 void UnidirectionalSequenceLstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
index fb59f01..857d277 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
@@ -44,7 +44,10 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index 4a43f9f..fa5dd9e 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -41,10 +41,12 @@
     ARMNN_ASSERT(GetNumOutputSlots() == 0);
 }
 
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 void UnmapLayer::Accept(ILayerVisitor& visitor) const
 {
     IgnoreUnused(visitor);
     throw armnn::Exception("UnmapLayer should not appear in an input graph");
 }
+ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp
index 12d4342..3d1d115 100644
--- a/src/armnn/layers/UnmapLayer.hpp
+++ b/src/armnn/layers/UnmapLayer.hpp
@@ -28,7 +28,10 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     void Accept(ILayerVisitor& visitor) const override;
+    ARMNN_NO_DEPRECATE_WARN_END
+
 
 protected:
     /// Constructor to create a UnmapLayer.
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index d3d8698..e21e777 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -58,73 +58,6 @@
     CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
 }
 
-void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
-                                                const ConstTensor* expected,
-                                                const ConstTensor* actual)
-{
-    if (expected == nullptr)
-    {
-        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
-    }
-    else
-    {
-        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
-        if (actual != nullptr)
-        {
-            CheckConstTensors(*expected, *actual);
-        }
-    }
-}
-
-void TestLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
-{
-    CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
-    CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
-    CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
-    CheckConstTensorPtrs("InputToInputWeights",
-        m_InputParams.m_InputToInputWeights, inputParams.m_InputToInputWeights);
-    CheckConstTensorPtrs("InputToForgetWeights",
-        m_InputParams.m_InputToForgetWeights, inputParams.m_InputToForgetWeights);
-    CheckConstTensorPtrs("InputToCellWeights", m_InputParams.m_InputToCellWeights, inputParams.m_InputToCellWeights);
-    CheckConstTensorPtrs(
-        "InputToOutputWeights", m_InputParams.m_InputToOutputWeights, inputParams.m_InputToOutputWeights);
-    CheckConstTensorPtrs(
-        "RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, inputParams.m_RecurrentToInputWeights);
-    CheckConstTensorPtrs(
-        "RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, inputParams.m_RecurrentToForgetWeights);
-    CheckConstTensorPtrs(
-        "RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, inputParams.m_RecurrentToCellWeights);
-    CheckConstTensorPtrs(
-        "RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, inputParams.m_RecurrentToOutputWeights);
-    CheckConstTensorPtrs(
-        "CellToInputWeights", m_InputParams.m_CellToInputWeights, inputParams.m_CellToInputWeights);
-    CheckConstTensorPtrs(
-        "CellToForgetWeights", m_InputParams.m_CellToForgetWeights, inputParams.m_CellToForgetWeights);
-    CheckConstTensorPtrs(
-        "CellToOutputWeights", m_InputParams.m_CellToOutputWeights, inputParams.m_CellToOutputWeights);
-    CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
-    CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
-    CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
-}
-
-void TestQLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
-                                                 const ConstTensor* expected,
-                                                 const ConstTensor* actual)
-{
-    if (expected == nullptr)
-    {
-        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
-    }
-    else
-    {
-        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
-        if (actual != nullptr)
-        {
-            CheckConstTensors(*expected, *actual);
-        }
-    }
-}
-
 void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
 {
     CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
@@ -134,95 +67,6 @@
     CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
 }
 
-void TestQLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
-{
-    CheckConstTensorPtrs("InputToInputWeights",
-                         m_InputParams.m_InputToInputWeights,
-                         inputParams.m_InputToInputWeights);
-
-    CheckConstTensorPtrs("InputToForgetWeights",
-                         m_InputParams.m_InputToForgetWeights,
-                         inputParams.m_InputToForgetWeights);
-
-    CheckConstTensorPtrs("InputToCellWeights",
-                         m_InputParams.m_InputToCellWeights,
-                         inputParams.m_InputToCellWeights);
-
-    CheckConstTensorPtrs("InputToOutputWeights",
-                         m_InputParams.m_InputToOutputWeights,
-                         inputParams.m_InputToOutputWeights);
-
-    CheckConstTensorPtrs("RecurrentToInputWeights",
-                         m_InputParams.m_RecurrentToInputWeights,
-                         inputParams.m_RecurrentToInputWeights);
-
-    CheckConstTensorPtrs("RecurrentToForgetWeights",
-                         m_InputParams.m_RecurrentToForgetWeights,
-                         inputParams.m_RecurrentToForgetWeights);
-
-    CheckConstTensorPtrs("RecurrentToCellWeights",
-                         m_InputParams.m_RecurrentToCellWeights,
-                         inputParams.m_RecurrentToCellWeights);
-
-    CheckConstTensorPtrs("RecurrentToOutputWeights",
-                         m_InputParams.m_RecurrentToOutputWeights,
-                         inputParams.m_RecurrentToOutputWeights);
-
-    CheckConstTensorPtrs("CellToInputWeights",
-                         m_InputParams.m_CellToInputWeights,
-                         inputParams.m_CellToInputWeights);
-
-    CheckConstTensorPtrs("CellToForgetWeights",
-                         m_InputParams.m_CellToForgetWeights,
-                         inputParams.m_CellToForgetWeights);
-
-    CheckConstTensorPtrs("CellToOutputWeights",
-                         m_InputParams.m_CellToOutputWeights,
-                         inputParams.m_CellToOutputWeights);
-
-    CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
-    CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
-
-    CheckConstTensorPtrs("InputGateBias",  m_InputParams.m_InputGateBias,  inputParams.m_InputGateBias);
-    CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
-    CheckConstTensorPtrs("CellBias",       m_InputParams.m_CellBias,       inputParams.m_CellBias);
-    CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
-
-    CheckConstTensorPtrs("InputLayerNormWeights",
-                         m_InputParams.m_InputLayerNormWeights,
-                         inputParams.m_InputLayerNormWeights);
-
-    CheckConstTensorPtrs("ForgetLayerNormWeights",
-                         m_InputParams.m_ForgetLayerNormWeights,
-                         inputParams.m_ForgetLayerNormWeights);
-
-    CheckConstTensorPtrs("CellLayerNormWeights",
-                         m_InputParams.m_CellLayerNormWeights,
-                         inputParams.m_CellLayerNormWeights);
-
-    CheckConstTensorPtrs("OutputLayerNormWeights",
-                         m_InputParams.m_OutputLayerNormWeights,
-                         inputParams.m_OutputLayerNormWeights);
-}
-
-void TestQuantizedLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
-                                                         const ConstTensor* expected,
-                                                         const ConstTensor* actual)
-{
-    if (expected == nullptr)
-    {
-        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
-    }
-    else
-    {
-        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
-        if (actual != nullptr)
-        {
-            CheckConstTensors(*expected, *actual);
-        }
-    }
-}
-
 void TestQuantizedLstmLayerVisitor::CheckInputParameters(const QuantizedLstmInputParams& inputParams)
 {
     CheckConstTensorPtrs("InputToInputWeights",
@@ -285,7 +129,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedConvolution2dLayer")
@@ -309,7 +153,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckConvolution2dLayerWithBiases")
@@ -338,7 +182,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
@@ -368,7 +212,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckDepthwiseConvolution2dLayer")
@@ -391,7 +235,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
@@ -418,7 +262,7 @@
                                                                         weights,
                                                                         EmptyOptional(),
                                                                         layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
@@ -447,7 +291,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
@@ -477,7 +321,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckFullyConnectedLayer")
@@ -500,8 +344,8 @@
     IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
     weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
 
-    weightsLayer->Accept(weightsVisitor);
-    layer->Accept(visitor);
+    weightsLayer->ExecuteStrategy(weightsVisitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedFullyConnectedLayer")
@@ -525,8 +369,8 @@
     IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
     weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
 
-    weightsLayer->Accept(weightsVisitor);
-    layer->Accept(visitor);
+    weightsLayer->ExecuteStrategy(weightsVisitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckFullyConnectedLayerWithBiases")
@@ -556,9 +400,9 @@
     weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
     biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
 
-    weightsLayer->Accept(weightsVisitor);
-    biasesLayer->Accept(biasesVisitor);
-    layer->Accept(visitor);
+    weightsLayer->ExecuteStrategy(weightsVisitor);
+    biasesLayer->ExecuteStrategy(biasesVisitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
@@ -589,9 +433,9 @@
     weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
     biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
 
-    weightsLayer->Accept(weightsVisitor);
-    biasesLayer->Accept(biasesVisitor);
-    layer->Accept(visitor);
+    weightsLayer->ExecuteStrategy(weightsVisitor);
+    biasesLayer->ExecuteStrategy(biasesVisitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckBatchNormalizationLayer")
@@ -621,7 +465,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedBatchNormalizationLayer")
@@ -653,7 +497,7 @@
 
     IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
             descriptor, mean, variance, beta, gamma, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckConstLayer")
@@ -667,7 +511,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConstantLayer(input);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedConstLayer")
@@ -682,7 +526,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckLstmLayerBasic")
@@ -754,7 +598,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedLstmLayerBasic")
@@ -827,7 +671,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckLstmLayerCifgDisabled")
@@ -918,7 +762,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedLstmLayerCifgDisabled")
@@ -1010,7 +854,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 // TODO add one with peephole
@@ -1097,7 +941,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
@@ -1211,7 +1055,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedLstmLayerPeephole")
@@ -1298,7 +1142,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 // TODO add one with projection
@@ -1385,7 +1229,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedLstmLayerProjection")
@@ -1472,7 +1316,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckQLstmLayerBasic")
@@ -1544,7 +1388,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedQLstmLayerBasic")
@@ -1617,7 +1461,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckQLstmLayerCifgDisabled")
@@ -1712,7 +1556,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
@@ -1829,7 +1673,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
@@ -1919,7 +1763,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckQLstmLayerProjectionEnabled")
@@ -2009,7 +1853,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
@@ -2132,7 +1976,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 
@@ -2222,7 +2066,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckNamedQuantizedLstmLayer")
@@ -2312,7 +2156,7 @@
     NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 }
diff --git a/src/armnn/test/ConstTensorLayerVisitor.hpp b/src/armnn/test/ConstTensorLayerVisitor.hpp
index 35e2e87..5538852 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.hpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.hpp
@@ -5,9 +5,14 @@
 #pragma once
 
 #include "TestLayerVisitor.hpp"
+#include "LayersFwd.hpp"
 #include <armnn/Descriptors.hpp>
 #include <armnn/LstmParams.hpp>
 #include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <backendsCommon/TensorHandle.hpp>
+
+#include <doctest/doctest.h>
 
 namespace armnn
 {
@@ -27,17 +32,33 @@
 
     virtual ~TestConvolution2dLayerVisitor() {}
 
-    void VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                 const Convolution2dDescriptor& convolution2dDescriptor,
-                                 const ConstTensor& weights,
-                                 const Optional<ConstTensor>& biases,
-                                 const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckDescriptor(convolution2dDescriptor);
-        CheckConstTensors(m_Weights, weights);
-        CheckOptionalConstTensors(m_Biases, biases);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Convolution2d:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckDescriptor(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
+                CheckConstTensors(m_Weights, constants[0]);
+                if (m_Biases.has_value())
+                {
+                    CHECK(constants.size() == 2);
+                    CheckConstTensors(m_Biases.value(), constants[1]);
+                }
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
@@ -64,17 +85,33 @@
 
     virtual ~TestDepthwiseConvolution2dLayerVisitor() {}
 
-    void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                          const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-                                          const ConstTensor& weights,
-                                          const Optional<ConstTensor>& biases,
-                                          const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckDescriptor(convolution2dDescriptor);
-        CheckConstTensors(m_Weights, weights);
-        CheckOptionalConstTensors(m_Biases, biases);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::DepthwiseConvolution2d:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckDescriptor(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
+                CheckConstTensors(m_Weights, constants[0]);
+                if (m_Biases.has_value())
+                {
+                    CHECK(constants.size() == 2);
+                    CheckConstTensors(m_Biases.value(), constants[1]);
+                }
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
@@ -97,13 +134,27 @@
 
     virtual ~TestFullyConnectedLayerVistor() {}
 
-    void VisitFullyConnectedLayer(const IConnectableLayer* layer,
-                                  const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                  const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckDescriptor(fullyConnectedDescriptor);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::FullyConnected:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckDescriptor(static_cast<const armnn::FullyConnectedDescriptor&>(descriptor));
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
@@ -129,21 +180,31 @@
         , m_Gamma(gamma)
     {}
 
-    void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                      const BatchNormalizationDescriptor& descriptor,
-                                      const ConstTensor& mean,
-                                      const ConstTensor& variance,
-                                      const ConstTensor& beta,
-                                      const ConstTensor& gamma,
-                                      const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckDescriptor(descriptor);
-        CheckConstTensors(m_Mean, mean);
-        CheckConstTensors(m_Variance, variance);
-        CheckConstTensors(m_Beta, beta);
-        CheckConstTensors(m_Gamma, gamma);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::BatchNormalization:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckDescriptor(static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor));
+                CheckConstTensors(m_Mean,     constants[0]);
+                CheckConstTensors(m_Variance, constants[1]);
+                CheckConstTensors(m_Beta,     constants[2]);
+                CheckConstTensors(m_Gamma,    constants[3]);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
@@ -166,81 +227,201 @@
         , m_Input(input)
     {}
 
-    void VisitConstantLayer(const IConnectableLayer* layer,
-                            const ConstTensor& input,
-                            const char* name = nullptr)
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckConstTensors(m_Input, input);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Constant:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckConstTensors(m_Input, constants[0]);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 private:
     ConstTensor m_Input;
 };
 
-class TestLstmLayerVisitor : public TestLayerVisitor
+// Used to supply utility functions to the actual lstm test visitors
+class LstmVisitor : public TestLayerVisitor
+{
+public:
+    explicit LstmVisitor(const LstmInputParams& params,
+                         const char* name = nullptr)
+         : TestLayerVisitor(name)
+         , m_InputParams(params) {}
+
+protected:
+    template<typename LayerType>
+    void CheckInputParameters(const LayerType* layer, const LstmInputParams& inputParams);
+
+    LstmInputParams m_InputParams;
+};
+
+template<typename LayerType>
+void LstmVisitor::CheckInputParameters(const LayerType* layer, const LstmInputParams& inputParams)
+{
+    CheckConstTensorPtrs("OutputGateBias",
+                         inputParams.m_OutputGateBias,
+                         layer->m_BasicParameters.m_OutputGateBias);
+    CheckConstTensorPtrs("InputToForgetWeights",
+                         inputParams.m_InputToForgetWeights,
+                         layer->m_BasicParameters.m_InputToForgetWeights);
+    CheckConstTensorPtrs("InputToCellWeights",
+                         inputParams.m_InputToCellWeights,
+                         layer->m_BasicParameters.m_InputToCellWeights);
+    CheckConstTensorPtrs("InputToOutputWeights",
+                         inputParams.m_InputToOutputWeights,
+                         layer->m_BasicParameters.m_InputToOutputWeights);
+    CheckConstTensorPtrs("RecurrentToForgetWeights",
+                         inputParams.m_RecurrentToForgetWeights,
+                         layer->m_BasicParameters.m_RecurrentToForgetWeights);
+    CheckConstTensorPtrs("RecurrentToCellWeights",
+                         inputParams.m_RecurrentToCellWeights,
+                         layer->m_BasicParameters.m_RecurrentToCellWeights);
+    CheckConstTensorPtrs("RecurrentToOutputWeights",
+                         inputParams.m_RecurrentToOutputWeights,
+                         layer->m_BasicParameters.m_RecurrentToOutputWeights);
+    CheckConstTensorPtrs("ForgetGateBias",
+                         inputParams.m_ForgetGateBias,
+                         layer->m_BasicParameters.m_ForgetGateBias);
+    CheckConstTensorPtrs("CellBias",
+                         inputParams.m_CellBias,
+                         layer->m_BasicParameters.m_CellBias);
+
+    CheckConstTensorPtrs("InputToInputWeights",
+                         inputParams.m_InputToInputWeights,
+                         layer->m_CifgParameters.m_InputToInputWeights);
+    CheckConstTensorPtrs("RecurrentToInputWeights",
+                         inputParams.m_RecurrentToInputWeights,
+                         layer->m_CifgParameters.m_RecurrentToInputWeights);
+    CheckConstTensorPtrs("InputGateBias",
+                         inputParams.m_InputGateBias,
+                         layer->m_CifgParameters.m_InputGateBias);
+
+    CheckConstTensorPtrs("ProjectionBias",
+                         inputParams.m_ProjectionBias,
+                         layer->m_ProjectionParameters.m_ProjectionBias);
+    CheckConstTensorPtrs("ProjectionWeights",
+                         inputParams.m_ProjectionWeights,
+                         layer->m_ProjectionParameters.m_ProjectionWeights);
+
+    CheckConstTensorPtrs("CellToInputWeights",
+                         inputParams.m_CellToInputWeights,
+                         layer->m_PeepholeParameters.m_CellToInputWeights);
+    CheckConstTensorPtrs("CellToForgetWeights",
+                         inputParams.m_CellToForgetWeights,
+                         layer->m_PeepholeParameters.m_CellToForgetWeights);
+    CheckConstTensorPtrs("CellToOutputWeights",
+                         inputParams.m_CellToOutputWeights,
+                         layer->m_PeepholeParameters.m_CellToOutputWeights);
+
+    CheckConstTensorPtrs("InputLayerNormWeights",
+                         inputParams.m_InputLayerNormWeights,
+                         layer->m_LayerNormParameters.m_InputLayerNormWeights);
+    CheckConstTensorPtrs("ForgetLayerNormWeights",
+                         inputParams.m_ForgetLayerNormWeights,
+                         layer->m_LayerNormParameters.m_ForgetLayerNormWeights);
+    CheckConstTensorPtrs("CellLayerNormWeights",
+                         inputParams.m_CellLayerNormWeights,
+                         layer->m_LayerNormParameters.m_CellLayerNormWeights);
+    CheckConstTensorPtrs("OutputLayerNormWeights",
+                         inputParams.m_OutputLayerNormWeights,
+                         layer->m_LayerNormParameters.m_OutputLayerNormWeights);
+}
+
+class TestLstmLayerVisitor : public LstmVisitor
 {
 public:
     explicit TestLstmLayerVisitor(const LstmDescriptor& descriptor,
                                   const LstmInputParams& params,
                                   const char* name = nullptr)
-        : TestLayerVisitor(name)
+        : LstmVisitor(params, name)
         , m_Descriptor(descriptor)
-        , m_InputParams(params)
     {}
 
-    void VisitLstmLayer(const IConnectableLayer* layer,
-                        const LstmDescriptor& descriptor,
-                        const LstmInputParams& params,
-                        const char* name = nullptr)
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckDescriptor(descriptor);
-        CheckInputParameters(params);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Lstm:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckDescriptor(static_cast<const armnn::LstmDescriptor&>(descriptor));
+                CheckInputParameters<const LstmLayer>(PolymorphicDowncast<const LstmLayer*>(layer), m_InputParams);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
     void CheckDescriptor(const LstmDescriptor& descriptor);
-    void CheckInputParameters(const LstmInputParams& inputParams);
-    void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
 
 private:
     LstmDescriptor m_Descriptor;
-    LstmInputParams m_InputParams;
 };
 
-class TestQLstmLayerVisitor : public TestLayerVisitor
+class TestQLstmLayerVisitor : public LstmVisitor
 {
 public:
     explicit TestQLstmLayerVisitor(const QLstmDescriptor& descriptor,
                                    const LstmInputParams& params,
                                    const char* name = nullptr)
-            : TestLayerVisitor(name)
+            : LstmVisitor(params, name)
             , m_Descriptor(descriptor)
-            , m_InputParams(params)
     {}
 
-    void VisitQLstmLayer(const IConnectableLayer* layer,
-                         const QLstmDescriptor& descriptor,
-                         const LstmInputParams& params,
-                         const char* name = nullptr)
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckDescriptor(descriptor);
-        CheckInputParameters(params);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::QLstm:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckDescriptor(static_cast<const armnn::QLstmDescriptor&>(descriptor));
+                CheckInputParameters<const QLstmLayer>(PolymorphicDowncast<const QLstmLayer*>(layer), m_InputParams);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
     void CheckDescriptor(const QLstmDescriptor& descriptor);
-    void CheckInputParameters(const LstmInputParams& inputParams);
-    void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
 
 private:
     QLstmDescriptor m_Descriptor;
-    LstmInputParams m_InputParams;
 };
 
 
@@ -253,18 +434,31 @@
         , m_InputParams(params)
     {}
 
-    void VisitQuantizedLstmLayer(const IConnectableLayer* layer,
-                                 const QuantizedLstmInputParams& params,
-                                 const char* name = nullptr)
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-        CheckInputParameters(params);
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::QuantizedLstm:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerName(name);
+                CheckInputParameters(m_InputParams);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
     }
 
 protected:
-    void CheckInputParameters(const QuantizedLstmInputParams& inputParams);
-    void CheckConstTensorPtrs(const std::string& name, const ConstTensor* expected, const ConstTensor* actual);
+    void CheckInputParameters(const QuantizedLstmInputParams& params);
 
 private:
     QuantizedLstmInputParams m_InputParams;
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 9acb60d..25dab59 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -398,26 +398,44 @@
 
 TEST_CASE("Network_AddQuantize")
 {
-    struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+    struct Test : public armnn::IStrategy
     {
-        void VisitQuantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            m_Visited = true;
+            armnn::IgnoreUnused(descriptor, constants, id);
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input: break;
+                case armnn::LayerType::Output: break;
+                case armnn::LayerType::Quantize:
+                {
+                    m_Visited = true;
 
-            CHECK(layer);
+                    CHECK(layer);
 
-            std::string expectedName = std::string("quantize");
-            CHECK(std::string(layer->GetName()) == expectedName);
-            CHECK(std::string(name) == expectedName);
+                    std::string expectedName = std::string("quantize");
+                    CHECK(std::string(layer->GetName()) == expectedName);
+                    CHECK(std::string(name) == expectedName);
 
-            CHECK(layer->GetNumInputSlots() == 1);
-            CHECK(layer->GetNumOutputSlots() == 1);
+                    CHECK(layer->GetNumInputSlots() == 1);
+                    CHECK(layer->GetNumOutputSlots() == 1);
 
-            const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
+                    const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+                    CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
 
-            const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
-            CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+                    const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+                    CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+                    break;
+                }
+                default:
+                {
+                    // nothing
+                }
+            }
         }
 
         bool m_Visited = false;
@@ -440,7 +458,7 @@
     quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
 
     Test testQuantize;
-    graph->Accept(testQuantize);
+    graph->ExecuteStrategy(testQuantize);
 
     CHECK(testQuantize.m_Visited == true);
 
@@ -448,29 +466,47 @@
 
 TEST_CASE("Network_AddMerge")
 {
-    struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+    struct Test : public armnn::IStrategy
     {
-        void VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            m_Visited = true;
+            armnn::IgnoreUnused(descriptor, constants, id);
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input: break;
+                case armnn::LayerType::Output: break;
+                case armnn::LayerType::Merge:
+                {
+                    m_Visited = true;
 
-            CHECK(layer);
+                    CHECK(layer);
 
-            std::string expectedName = std::string("merge");
-            CHECK(std::string(layer->GetName()) == expectedName);
-            CHECK(std::string(name) == expectedName);
+                    std::string expectedName = std::string("merge");
+                    CHECK(std::string(layer->GetName()) == expectedName);
+                    CHECK(std::string(name) == expectedName);
 
-            CHECK(layer->GetNumInputSlots() == 2);
-            CHECK(layer->GetNumOutputSlots() == 1);
+                    CHECK(layer->GetNumInputSlots() == 2);
+                    CHECK(layer->GetNumOutputSlots() == 1);
 
-            const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
+                    const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+                    CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
 
-            const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
-            CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
+                    const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
+                    CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
 
-            const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
-            CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
+                    const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+                    CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
+                    break;
+                }
+                default:
+                {
+                    // nothing
+                }
+            }
         }
 
         bool m_Visited = false;
@@ -493,7 +529,7 @@
     merge->GetOutputSlot(0).SetTensorInfo(info);
 
     Test testMerge;
-    network->Accept(testMerge);
+    network->ExecuteStrategy(testMerge);
 
     CHECK(testMerge.m_Visited == true);
 }
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 66da3ad..8416a8d 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -13,13 +13,12 @@
 #include <armnn/BackendHelper.hpp>
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
 
 #include <armnn/utility/Assert.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
-#include <armnnUtils/FloatingPointConverter.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
 
-#include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/LayerSupportBase.hpp>
 #include <backendsCommon/TensorHandle.hpp>
 
@@ -201,10 +200,6 @@
         return nullptr;
     }
 
-    IBackendInternal::Optimizations GetOptimizations() const override
-    {
-        return {};
-    }
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
     {
         return std::make_shared<MockLayerSupport>();
@@ -265,10 +260,6 @@
         return nullptr;
     }
 
-    IBackendInternal::Optimizations GetOptimizations() const override
-    {
-        return {};
-    }
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override
     {
         return std::make_shared<MockLayerSupport>();
@@ -707,30 +698,42 @@
 
 TEST_CASE("BackendHintTest")
 {
-    class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
+    class TestBackendAssignment : public StrategyBase<NoThrowStrategy>
     {
     public:
-        void VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
-        {
-            IgnoreUnused(id, name);
-            auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
-            CHECK((inputLayer->GetBackendId() == "MockBackend"));
-        }
 
-        void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
+        void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                             const armnn::BaseDescriptor& descriptor,
+                             const std::vector<armnn::ConstTensor>& constants,
+                             const char* name,
+                             const armnn::LayerBindingId id = 0) override
         {
-            IgnoreUnused(id, name);
-            auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
-            CHECK((outputLayer->GetBackendId() == "MockBackend"));
-        }
-
-        void VisitActivationLayer(const IConnectableLayer* layer,
-                                  const ActivationDescriptor& activationDescriptor,
-                                  const char* name = nullptr) override
-        {
-            IgnoreUnused(activationDescriptor, name);
-            auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
-            CHECK((activation->GetBackendId() == "CustomBackend"));
+            armnn::IgnoreUnused(descriptor, constants, id, name);
+            switch (layer->GetType())
+            {
+                case armnn::LayerType::Input:
+                {
+                    auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
+                    CHECK((inputLayer->GetBackendId() == "MockBackend"));
+                    break;
+                }
+                case armnn::LayerType::Output:
+                {
+                    auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
+                    CHECK((outputLayer->GetBackendId() == "MockBackend"));
+                    break;
+                }
+                case armnn::LayerType::Activation:
+                {
+                    auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
+                    CHECK((activation->GetBackendId() == "CustomBackend"));
+                    break;
+                }
+                default:
+                {
+                    m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+                }
+            }
         }
     };
 
@@ -802,7 +805,7 @@
     TestBackendAssignment visitor;
     for (auto it = firstLayer; it != lastLayer; ++it)
     {
-        (*it)->Accept(visitor);
+        (*it)->ExecuteStrategy(visitor);
     }
     // Clean up the registry for the next test.
     backendRegistry.Deregister("MockBackend");
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 8462290..3b18e07 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -19,7 +19,7 @@
     NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddInputLayer(1, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
@@ -28,7 +28,7 @@
     NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddInputLayer(1);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
@@ -38,7 +38,7 @@
     NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddOutputLayer(1, layerName);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
@@ -47,7 +47,7 @@
     NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddOutputLayer(1);
-    layer->Accept(visitor);
+    layer->ExecuteStrategy(visitor);
 }
 
 }
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.hpp b/src/armnn/test/TestInputOutputLayerVisitor.hpp
index b890895..e812f2f 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.hpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.hpp
@@ -27,14 +27,28 @@
         , visitorId(id)
     {};
 
-    void VisitInputLayer(const IConnectableLayer* layer,
-                         LayerBindingId id,
-                         const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerBindingId(visitorId, id);
-        CheckLayerName(name);
-    };
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Input:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerBindingId(visitorId, id);
+                CheckLayerName(name);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
+    }
 };
 
 class TestOutputLayerVisitor : public TestLayerVisitor
@@ -48,14 +62,28 @@
         , visitorId(id)
     {};
 
-    void VisitOutputLayer(const IConnectableLayer* layer,
-                          LayerBindingId id,
-                          const char* name = nullptr) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        CheckLayerPointer(layer);
-        CheckLayerBindingId(visitorId, id);
-        CheckLayerName(name);
-    };
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
+        {
+            case armnn::LayerType::Output:
+            {
+                CheckLayerPointer(layer);
+                CheckLayerBindingId(visitorId, id);
+                CheckLayerName(name);
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
+        }
+    }
 };
 
 } //namespace armnn
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index ec40511..d5f705f 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -49,6 +49,62 @@
     }
 }
 
+void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const ConstTensorHandle& actual)
+{
+    auto& actualInfo = actual.GetTensorInfo();
+    CHECK(expected.GetInfo() == actualInfo);
+    CHECK(expected.GetNumDimensions() == actualInfo.GetNumDimensions());
+    CHECK(expected.GetNumElements() == actualInfo.GetNumElements());
+    CHECK(expected.GetNumBytes() == actualInfo.GetNumBytes());
+    if (expected.GetNumBytes() == actualInfo.GetNumBytes())
+    {
+        //check data is the same byte by byte
+        const unsigned char* expectedPtr = static_cast<const unsigned char*>(expected.GetMemoryArea());
+        const unsigned char* actualPtr = static_cast<const unsigned char*>(actual.Map(true));
+        for (unsigned int i = 0; i < expected.GetNumBytes(); i++)
+        {
+            CHECK(*(expectedPtr + i) == *(actualPtr + i));
+        }
+        actual.Unmap();
+    }
+}
+
+void TestLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+                                            const ConstTensor* expected,
+                                            const std::shared_ptr<ConstTensorHandle> actual)
+{
+    if (expected == nullptr)
+    {
+        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+    }
+    else
+    {
+        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+        if (actual != nullptr)
+        {
+            CheckConstTensors(*expected, *actual);
+        }
+    }
+}
+
+void TestLayerVisitor::CheckConstTensorPtrs(const std::string& name,
+                                            const ConstTensor* expected,
+                                            const ConstTensor* actual)
+{
+    if (expected == nullptr)
+    {
+        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+    }
+    else
+    {
+        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+        if (actual != nullptr)
+        {
+            CheckConstTensors(*expected, *actual);
+        }
+    }
+}
+
 void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
                                                  const Optional<ConstTensor>& actual)
 {
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index e43227f..eaf1667 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -4,13 +4,14 @@
 //
 #pragma once
 
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
 #include <armnn/Descriptors.hpp>
+#include <backendsCommon/TensorHandle.hpp>
 
 namespace armnn
 {
-// Abstract base class with do nothing implementations for all layer visit methods
-class TestLayerVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
+// Abstract base class with do nothing implementations for all layers
+class TestLayerVisitor : public StrategyBase<NoThrowStrategy>
 {
 protected:
     virtual ~TestLayerVisitor() {}
@@ -19,7 +20,17 @@
 
     void CheckLayerPointer(const IConnectableLayer* layer);
 
-    void CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual);
+    void CheckConstTensors(const ConstTensor& expected,
+                           const ConstTensor& actual);
+    void CheckConstTensors(const ConstTensor& expected,
+                           const ConstTensorHandle& actual);
+
+    void CheckConstTensorPtrs(const std::string& name,
+                              const ConstTensor* expected,
+                              const ConstTensor* actual);
+    void CheckConstTensorPtrs(const std::string& name,
+                              const ConstTensor* expected,
+                              const std::shared_ptr<ConstTensorHandle> actual);
 
     void CheckOptionalConstTensors(const Optional<ConstTensor>& expected, const Optional<ConstTensor>& actual);
 
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 39c00f4..cfdaaf5 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -20,7 +20,7 @@
     Test##name##LayerVisitor visitor(descriptor, layerName); \
     armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor, layerName); \
-    layer->Accept(visitor); \
+    layer->ExecuteStrategy(visitor); \
 }
 
 #define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name, testName) \
@@ -30,7 +30,7 @@
     Test##name##LayerVisitor visitor(descriptor); \
     armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor); \
-    layer->Accept(visitor); \
+    layer->ExecuteStrategy(visitor); \
 }
 
 template<typename Descriptor> Descriptor GetDescriptor();
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index a3c1420..b1f9512 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -29,15 +29,31 @@
         : armnn::TestLayerVisitor(layerName) \
         , m_Descriptor(descriptor) {}; \
     \
-    void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
-                            const Descriptor& descriptor, \
-                            const char* layerName = nullptr) override \
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer, \
+                         const armnn::BaseDescriptor& descriptor, \
+                         const std::vector<armnn::ConstTensor>& constants, \
+                         const char* layerName, \
+                         const armnn::LayerBindingId id = 0) override \
     { \
-        CheckLayerPointer(layer); \
-        CheckDescriptor(descriptor); \
-        CheckLayerName(layerName); \
+        armnn::IgnoreUnused(descriptor, constants, id); \
+        switch (layer->GetType()) \
+        { \
+            case armnn::LayerType::Input: break; \
+            case armnn::LayerType::Output: break; \
+            case armnn::LayerType::name: break; \
+            { \
+                CheckLayerPointer(layer); \
+                CheckDescriptor(static_cast<const Descriptor&>(descriptor)); \
+                CheckLayerName(layerName); \
+                break; \
+            } \
+            default: \
+            { \
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType())); \
+            } \
+        } \
     } \
-};
+}; \
 
 } // anonymous namespace
 
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 00d65f8..497c36b 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -18,7 +18,7 @@
     Test##name##LayerVisitor visitor("name##Layer"); \
     armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer("name##Layer"); \
-    layer->Accept(visitor); \
+    layer->ExecuteStrategy(visitor); \
 }
 
 #define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, testName) \
@@ -27,7 +27,7 @@
     Test##name##LayerVisitor visitor; \
     armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer(); \
-    layer->Accept(visitor); \
+    layer->ExecuteStrategy(visitor); \
 }
 
 } // anonymous namespace
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index 519cbba..c0db857 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -15,12 +15,28 @@
 public: \
     explicit Test##name##LayerVisitor(const char* layerName = nullptr) : armnn::TestLayerVisitor(layerName) {}; \
     \
-    void Visit##name##Layer(const armnn::IConnectableLayer* layer, \
-                            const char* layerName = nullptr) override \
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer, \
+                         const armnn::BaseDescriptor& descriptor, \
+                         const std::vector<armnn::ConstTensor>& constants, \
+                         const char* layerName, \
+                         const armnn::LayerBindingId id = 0) override \
     { \
-        CheckLayerPointer(layer); \
-        CheckLayerName(layerName); \
+        armnn::IgnoreUnused(descriptor, constants, id); \
+        switch (layer->GetType()) \
+        { \
+            case armnn::LayerType::name: \
+            { \
+                CheckLayerPointer(layer); \
+                CheckLayerName(layerName); \
+                break; \
+            } \
+            default: \
+            { \
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType())); \
+            } \
+        } \
     } \
+    \
 };
 
 } // anonymous namespace
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index eaeab78..6b73946 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -2412,6 +2412,9 @@
     RegisterOutputSlots(graph, layerIndex, layer);
 }
 
+
+/// @Note The ResizeBiliniar operation was deprecated and removed in favor of the Resize operation.
+///       This function is kept for backwards compatibility.
 void IDeserializer::DeserializerImpl::ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/test/DeserializeComparison.cpp b/src/armnnDeserializer/test/DeserializeComparison.cpp
index a941f12..3dda34c 100644
--- a/src/armnnDeserializer/test/DeserializeComparison.cpp
+++ b/src/armnnDeserializer/test/DeserializeComparison.cpp
@@ -241,15 +241,6 @@
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual,       Float32)
 
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal,          QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater,        QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less,           QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual,    QuantisedAsymm8)
-DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual,       QuantisedAsymm8)
-ARMNN_NO_DEPRECATE_WARN_END
-
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal,          QAsymmU8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater,        QAsymmU8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QAsymmU8)
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index efaf9f8..7e1b74e 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -155,15 +155,6 @@
     CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
 }
 
-void SerializerStrategy::SerializeAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
-    IgnoreUnused(name);
-    auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
-    auto flatBufferAbsLayer  = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
-
-    CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
-}
-
 // Build FlatBuffer for Activation Layer
 void SerializerStrategy::SerializeActivationLayer(const armnn::IConnectableLayer* layer,
                                                   const armnn::ActivationDescriptor& descriptor,
@@ -570,16 +561,6 @@
     CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
 }
 
-void SerializerStrategy::SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
-    IgnoreUnused(name);
-
-    auto fbBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
-    auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
-
-    CreateAnyLayer(fbEqualLayer.o, serializer::Layer::Layer_EqualLayer);
-}
-
 void SerializerStrategy::SerializeFillLayer(const armnn::IConnectableLayer* layer,
                                        const armnn::FillDescriptor& fillDescriptor,
                                        const char* name)
@@ -619,17 +600,6 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
 }
 
-
-void SerializerStrategy::SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
-    IgnoreUnused(name);
-
-    auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
-    auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
-
-    CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer);
-}
-
 void SerializerStrategy::SerializeInstanceNormalizationLayer(
     const armnn::IConnectableLayer* layer,
     const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
@@ -874,13 +844,6 @@
     CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
 }
 
-void SerializerStrategy::SerializeMergerLayer(const armnn::IConnectableLayer* layer,
-                                         const armnn::MergerDescriptor& mergerDescriptor,
-                                         const char* name)
-{
-    SerializeConcatLayer(layer, mergerDescriptor, name);
-}
-
 void SerializerStrategy::SerializeConcatLayer(const armnn::IConnectableLayer* layer,
                                          const armnn::ConcatDescriptor& concatDescriptor,
                                          const char* name)
@@ -1034,29 +997,6 @@
     CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
 }
 
-void SerializerStrategy::SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
-                                                 const armnn::ResizeBilinearDescriptor& resizeDescriptor,
-                                                 const char* name)
-{
-    IgnoreUnused(name);
-
-    auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
-
-    auto flatBufferDescriptor =
-        CreateResizeBilinearDescriptor(m_flatBufferBuilder,
-                                       resizeDescriptor.m_TargetWidth,
-                                       resizeDescriptor.m_TargetHeight,
-                                       GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
-                                       resizeDescriptor.m_AlignCorners,
-                                       resizeDescriptor.m_HalfPixelCenters);
-
-    auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
-                                                                 flatBufferBaseLayer,
-                                                                 flatBufferDescriptor);
-
-    CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
-}
-
 void SerializerStrategy::SerializeResizeLayer(const armnn::IConnectableLayer* layer,
                                          const armnn::ResizeDescriptor& resizeDescriptor,
                                          const char* name)
@@ -1081,16 +1021,6 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
 }
 
-void SerializerStrategy::SerializeRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
-{
-    IgnoreUnused(name);
-
-    auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
-    auto fbRsqrtLayer     = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
-
-    CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer);
-}
-
 void SerializerStrategy::SerializeSliceLayer(const armnn::IConnectableLayer* layer,
                                         const armnn::SliceDescriptor& sliceDescriptor,
                                         const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 1161095..2f827ac 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -104,10 +104,6 @@
     uint32_t m_layerId;
 
 private:
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    void SerializeAbsLayer(const armnn::IConnectableLayer* layer,
-                                  const char* name = nullptr);
-
     void SerializeActivationLayer(const armnn::IConnectableLayer* layer,
                                   const armnn::ActivationDescriptor& descriptor,
                                   const char* name = nullptr);
@@ -181,9 +177,6 @@
                                         const armnn::ElementwiseUnaryDescriptor& descriptor,
                                         const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
-    void SerializeEqualLayer(const armnn::IConnectableLayer* layer, const char* name);
-
     void SerializeFillLayer(const armnn::IConnectableLayer* layer,
                             const armnn::FillDescriptor& fillDescriptor,
                             const char* name = nullptr);
@@ -199,9 +192,6 @@
                               const armnn::GatherDescriptor& gatherDescriptor,
                               const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use VisitComparisonLayer instead")
-    void SerializeGreaterLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr);
-
     void SerializeInputLayer(const armnn::IConnectableLayer* layer,
                          armnn::LayerBindingId id,
                          const char* name = nullptr);
@@ -240,11 +230,6 @@
     void SerializeMergeLayer(const armnn::IConnectableLayer* layer,
                              const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use VisitConcatLayer instead")
-    void SerializeMergerLayer(const armnn::IConnectableLayer* layer,
-                              const armnn::MergerDescriptor& mergerDescriptor,
-                              const char* name = nullptr);
-
     void SerializeMultiplicationLayer(const armnn::IConnectableLayer* layer,
                                       const char* name = nullptr);
 
@@ -294,15 +279,6 @@
                               const armnn::ResizeDescriptor& resizeDescriptor,
                               const char* name = nullptr);
 
-    ARMNN_DEPRECATED_MSG("Use VisitResizeLayer instead")
-    void SerializeResizeBilinearLayer(const armnn::IConnectableLayer* layer,
-                                      const armnn::ResizeBilinearDescriptor& resizeDescriptor,
-                                      const char* name = nullptr);
-
-    ARMNN_DEPRECATED_MSG("Use VisitElementwiseUnaryLayer instead")
-    void SerializeRsqrtLayer(const armnn::IConnectableLayer* layer,
-                             const char* name = nullptr);
-
     void SerializeSliceLayer(const armnn::IConnectableLayer* layer,
                              const armnn::SliceDescriptor& sliceDescriptor,
                              const char* name = nullptr);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 2f8fd73..f2c9852 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -748,7 +748,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-TEST_CASE("SerializeDeserializeEqual")
+TEST_CASE("SerializeDeserializeComparisonEqual")
 {
     const std::string layerName("EqualLayer");
     const armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32);
@@ -758,9 +758,8 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer2 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const equalLayer = network->AddEqualLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::ComparisonDescriptor equalDescriptor(armnn::ComparisonOperation::Equal);
+    armnn::IConnectableLayer* const equalLayer = network->AddComparisonLayer(equalDescriptor, layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer1->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
@@ -1111,10 +1110,7 @@
 }
 
 
-// NOTE: Until the deprecated AddGreaterLayer disappears this test checks that calling
-//       AddGreaterLayer places a ComparisonLayer into the serialized format and that
-//       when this deserialises we have a ComparisonLayer
-TEST_CASE("SerializeGreaterDeprecated")
+TEST_CASE("SerializeComparisonGreater")
 {
     const std::string layerName("greater");
 
@@ -1126,9 +1122,8 @@
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
     armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const equalLayer = network->AddGreaterLayer(layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::ComparisonDescriptor greaterDescriptor(armnn::ComparisonOperation::Greater);
+    armnn::IConnectableLayer* const equalLayer = network->AddComparisonLayer(greaterDescriptor, layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer0->GetOutputSlot(0).Connect(equalLayer->GetInputSlot(0));
@@ -1444,44 +1439,6 @@
     }
 };
 
-// NOTE: Until the deprecated AddMergerLayer disappears this test checks that calling
-//       AddMergerLayer places a ConcatLayer into the serialized format and that
-//       when this deserialises we have a ConcatLayer
-TEST_CASE("SerializeMerger")
-{
-    const std::string layerName("merger");
-    const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
-    const armnn::TensorInfo outputInfo = armnn::TensorInfo({4, 3, 2, 2}, armnn::DataType::Float32);
-
-    const std::vector<armnn::TensorShape> shapes({inputInfo.GetShape(), inputInfo.GetShape()});
-
-    armnn::OriginsDescriptor descriptor =
-        armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), 0);
-
-    armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0);
-    armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const mergerLayer = network->AddMergerLayer(descriptor, layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
-    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
-
-    inputLayerOne->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
-    inputLayerTwo->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
-    mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    inputLayerOne->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    inputLayerTwo->GetOutputSlot(0).SetTensorInfo(inputInfo);
-    mergerLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
-
-    std::string mergerLayerNetwork = SerializeNetwork(*network);
-    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(mergerLayerNetwork);
-    CHECK(deserializedNetwork);
-
-    MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor);
-    deserializedNetwork->ExecuteStrategy(verifier);
-}
-
 TEST_CASE("EnsureMergerLayerBackwardCompatibility")
 {
     // The hex data below is a flat buffer containing a simple network with two inputs
@@ -1979,14 +1936,14 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-class ResizeBilinearLayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::ResizeBilinearDescriptor>
+class ResizeBilinearLayerVerifier : public LayerVerifierBaseWithDescriptor<armnn::ResizeDescriptor>
 {
 public:
     ResizeBilinearLayerVerifier(const std::string& layerName,
                                 const std::vector<armnn::TensorInfo>& inputInfos,
                                 const std::vector<armnn::TensorInfo>& outputInfos,
-                                const armnn::ResizeBilinearDescriptor& descriptor)
-        : LayerVerifierBaseWithDescriptor<armnn::ResizeBilinearDescriptor>(
+                                const armnn::ResizeDescriptor& descriptor)
+        : LayerVerifierBaseWithDescriptor<armnn::ResizeDescriptor>(
             layerName, inputInfos, outputInfos, descriptor) {}
 
     void ExecuteStrategy(const armnn::IConnectableLayer* layer,
@@ -2022,16 +1979,14 @@
     }
 };
 
-// NOTE: Until the deprecated AddResizeBilinearLayer disappears this test checks that
-//       calling AddResizeBilinearLayer places a ResizeLayer into the serialized format
-//       and that when this deserialises we have a ResizeLayer
 TEST_CASE("SerializeResizeBilinear")
 {
     const std::string layerName("resizeBilinear");
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
 
-    armnn::ResizeBilinearDescriptor desc;
+    armnn::ResizeDescriptor desc;
+    desc.m_Method = armnn::ResizeMethod::Bilinear;
     desc.m_TargetWidth  = 4u;
     desc.m_TargetHeight = 2u;
     desc.m_AlignCorners = true;
@@ -2039,9 +1994,7 @@
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
     armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    armnn::IConnectableLayer* const resizeLayer = network->AddResizeBilinearLayer(desc, layerName.c_str());
-    ARMNN_NO_DEPRECATE_WARN_END
+    armnn::IConnectableLayer* const resizeLayer = network->AddResizeLayer(desc, layerName.c_str());
     armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(resizeLayer->GetInputSlot(0));
@@ -2060,7 +2013,7 @@
 TEST_CASE("EnsureResizeBilinearBackwardCompatibility")
 {
     // The hex data below is a flat buffer containing a simple network with an input,
-    // a ResizeBilinearLayer (now deprecated) and an output
+    // a ResizeBilinearLayer (now deprecated and removed) and an output
     //
     // This test verifies that we can still deserialize this old-style model by replacing
     // the ResizeBilinearLayer with an equivalent ResizeLayer
@@ -2105,7 +2058,7 @@
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
 
-    armnn::ResizeBilinearDescriptor descriptor;
+    armnn::ResizeDescriptor descriptor;
     descriptor.m_TargetWidth  = 4u;
     descriptor.m_TargetHeight = 2u;
 
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index b405e19..8426246 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -5,7 +5,7 @@
 
 #include "ParserFlatbuffersFixture.hpp"
 
-#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/StrategyBase.hpp>
 #include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
@@ -19,45 +19,55 @@
 {
 using namespace armnn;
 
-class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
+class StandInLayerVerifier : public StrategyBase<NoThrowStrategy>
 {
 public:
     StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
                          const std::vector<TensorInfo>& outputInfos)
-        : LayerVisitorBase<VisitorThrowingPolicy>()
-        , m_InputInfos(inputInfos)
+        : m_InputInfos(inputInfos)
         , m_OutputInfos(outputInfos) {}
 
-    void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
-
-    void VisitOutputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
-
-    void VisitStandInLayer(const IConnectableLayer* layer,
-                           const StandInDescriptor& descriptor,
-                           const char*) override
+    void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+                         const armnn::BaseDescriptor& descriptor,
+                         const std::vector<armnn::ConstTensor>& constants,
+                         const char* name,
+                         const armnn::LayerBindingId id = 0) override
     {
-        unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
-        CHECK(descriptor.m_NumInputs    == numInputs);
-        CHECK(layer->GetNumInputSlots() == numInputs);
-
-        unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
-        CHECK(descriptor.m_NumOutputs    == numOutputs);
-        CHECK(layer->GetNumOutputSlots() == numOutputs);
-
-        const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
-        for (unsigned int i = 0u; i < numInputs; ++i)
+        armnn::IgnoreUnused(descriptor, constants, id);
+        switch (layer->GetType())
         {
-            const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
-            CHECK(connectedSlot != nullptr);
+            case armnn::LayerType::StandIn:
+            {
+                auto standInDescriptor = static_cast<const armnn::StandInDescriptor&>(descriptor);
+                unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
+                        CHECK(standInDescriptor.m_NumInputs    == numInputs);
+                        CHECK(layer->GetNumInputSlots() == numInputs);
 
-            const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
-            CHECK(inputInfo == m_InputInfos[i]);
-        }
+                unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
+                        CHECK(standInDescriptor.m_NumOutputs    == numOutputs);
+                        CHECK(layer->GetNumOutputSlots() == numOutputs);
 
-        for (unsigned int i = 0u; i < numOutputs; ++i)
-        {
-            const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
-            CHECK(outputInfo == m_OutputInfos[i]);
+                const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
+                for (unsigned int i = 0u; i < numInputs; ++i)
+                {
+                    const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
+                            CHECK(connectedSlot != nullptr);
+
+                    const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
+                            CHECK(inputInfo == m_InputInfos[i]);
+                }
+
+                for (unsigned int i = 0u; i < numOutputs; ++i)
+                {
+                    const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
+                            CHECK(outputInfo == m_OutputInfos[i]);
+                }
+                break;
+            }
+            default:
+            {
+                m_DefaultStrategy.Apply(GetLayerTypeAsCString(layer->GetType()));
+            }
         }
     }
 
@@ -164,7 +174,7 @@
     void RunTest()
     {
         INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
-        network->Accept(m_StandInLayerVerifier);
+        network->ExecuteStrategy(m_StandInLayerVerifier);
     }
 
 private:
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 98b5ada..0630931 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -37,10 +37,6 @@
         {
             return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
         }
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case armnn::DataType::QuantizedSymm8PerAxis:
-            return arm_compute::DataType::QSYMM8_PER_CHANNEL;
-        ARMNN_NO_DEPRECATE_WARN_END
         case armnn::DataType::Signed32:
             return arm_compute::DataType::S32;
         default:
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index 129cdbe..c894f98 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -12,7 +12,6 @@
     DynamicBackendUtils.hpp
     IBackendContext.hpp
     IBackendInternal.cpp
-    IBackendInternal.hpp
     IMemoryManager.hpp
     ITensorHandle.hpp
     ITensorHandleFactory.cpp
diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp
index 31706eb..ec1313d 100644
--- a/src/backends/backendsCommon/IBackendInternal.cpp
+++ b/src/backends/backendsCommon/IBackendInternal.cpp
@@ -9,26 +9,6 @@
 namespace armnn
 {
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-IBackendInternal::ISubGraphConverterPtr IBackendInternal::CreateSubGraphConverter(
-    const std::shared_ptr<SubGraph>& /*subGrapg*/) const
-{
-    return ISubGraphConverterPtr{};
-}
-
-IBackendInternal::Optimizations IBackendInternal::GetOptimizations() const
-{
-    return Optimizations{};
-}
-
-IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& /*subGraph*/,
-                                                                       bool& optimizationAttempted) const
-{
-    optimizationAttempted = false;
-    return nullptr;
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 IMemoryManagerUniquePtr IBackendInternal::CreateMemoryManager() const
 {
     return IMemoryManagerUniquePtr();
@@ -120,29 +100,12 @@
     return GetLayerSupport();
 }
 
-// Default implementation of OptimizeSubgraphView for backward compatibility with the old API.
+// Default implementation of OptimizeSubgraphView. Returns an untouched subgraph.
 // Override this method with a custom optimization implementation.
 OptimizationViews IBackendInternal::OptimizeSubgraphView(const SubgraphView& subgraph) const
 {
-    bool optimizationAttempted = false;
-
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    SubGraphUniquePtr optSubgraph = OptimizeSubGraph(subgraph, optimizationAttempted);
-    ARMNN_NO_DEPRECATE_WARN_END
-
     OptimizationViews result;
-    if (!optimizationAttempted)
-    {
-        result.AddUntouchedSubgraph(SubgraphView(subgraph));
-    }
-    else if (optSubgraph)
-    {
-        result.AddSubstitution({subgraph, SubgraphView(*optSubgraph.get())});
-    }
-    else
-    {
-        result.AddFailedSubgraph(SubgraphView(subgraph));
-    }
+    result.AddUntouchedSubgraph(SubgraphView(subgraph));
 
     return result;
 }
diff --git a/src/backends/backendsCommon/IBackendInternal.hpp b/src/backends/backendsCommon/IBackendInternal.hpp
deleted file mode 100644
index 61ccc4f..0000000
--- a/src/backends/backendsCommon/IBackendInternal.hpp
+++ /dev/null
@@ -1,9 +0,0 @@
-//
-// Copyright © 2019 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-// This file is depricated and will be removed soon.
-// Please use the new header in armnn/backends instead.
-// This will use the new armnn/backends header.
-#include <armnn/backends/IBackendInternal.hpp>
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 2c3f827..ca1acc3 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -37,13 +37,6 @@
 namespace armnn
 {
 
-bool LayerSupportBase::IsAbsSupported(const TensorInfo&, // input
-                                      const TensorInfo&, // output
-                                      Optional<std::string &> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
 bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input
                                              const TensorInfo&, //output
                                              const ActivationDescriptor&, // descriptor
@@ -238,31 +231,11 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input,
-                                                   const TensorInfo& output,
-                                                   const ElementwiseUnaryDescriptor& descriptor,
+bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo&, // input
+                                                   const TensorInfo&, // output
+                                                   const ElementwiseUnaryDescriptor&, // descriptor
                                                    Optional<std::string&> reasonIfUnsupported) const
 {
-    if (descriptor.m_Operation == UnaryOperation::Abs)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        return IsAbsSupported(input, output, reasonIfUnsupported);
-        ARMNN_NO_DEPRECATE_WARN_END
-    }
-    else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
-    {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        return IsRsqrtSupported(input, output, reasonIfUnsupported);
-        ARMNN_NO_DEPRECATE_WARN_END
-    }
-    return false;
-}
-
-bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo&, // input0
-                                        const armnn::TensorInfo&, // input1
-                                        const armnn::TensorInfo&, // output
-                                        armnn::Optional<std::string &> reasonIfUnsupported) const
-{
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
@@ -301,28 +274,12 @@
 bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
                                          const armnn::TensorInfo&, // input1
                                          const armnn::TensorInfo&, // output
-                                         armnn::Optional<std::string&> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
-                                         const armnn::TensorInfo&, // input1
-                                         const armnn::TensorInfo&, // output
                                          const GatherDescriptor&, // descriptor
                                          armnn::Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsGreaterSupported(const TensorInfo&, // input0
-                                          const TensorInfo&, // input1
-                                          const TensorInfo&, // output
-                                          Optional<std::string&> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
 bool LayerSupportBase::IsInputSupported(const TensorInfo&, // input
                                         Optional<std::string&> reasonIfUnsupported) const
 {
@@ -422,14 +379,6 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                         const TensorInfo& output,
-                                         const OriginsDescriptor& descriptor,
-                                         Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
 bool LayerSupportBase::IsMinimumSupported(const TensorInfo&, // input0
                                           const TensorInfo&, // input1
                                           const TensorInfo&, // output
@@ -553,13 +502,6 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo&, // input
-                                                 const TensorInfo&, // output
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
 bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input
                                          const TensorInfo&, // output
                                          const ResizeDescriptor&, // descriptor
@@ -568,13 +510,6 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsRsqrtSupported(const TensorInfo&, // input
-                                        const TensorInfo&, // output
-                                        Optional<std::string &> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
 bool LayerSupportBase::IsShapeSupported(const TensorInfo&, // input
                                         const TensorInfo&, // output
                                         Optional<std::string&> reasonIfUnsupported) const
@@ -615,13 +550,6 @@
 }
 
 bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
-                                           const ViewsDescriptor&, // descriptor
-                                           Optional<std::string&> reasonIfUnsupported) const
-{
-    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
                                            const std::vector<std::reference_wrapper<TensorInfo>>&, // outputs
                                            const ViewsDescriptor&, // descriptor
                                            Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 240b1da..fc2906f 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -13,11 +13,6 @@
 class LayerSupportBase : public ILayerSupport
 {
 public:
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsAbsSupported(const TensorInfo& input,
-                        const TensorInfo& output,
-                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -149,12 +144,6 @@
                                      const ElementwiseUnaryDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsEqualSupported(const TensorInfo& input0,
-                          const TensorInfo& input1,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -175,24 +164,12 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
-    bool IsGatherSupported(const TensorInfo& input0,
-                           const TensorInfo& input1,
-                           const TensorInfo& output,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsGatherSupported(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
                            const GatherDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsGreaterSupported(const TensorInfo& input0,
-                            const TensorInfo& input1,
-                            const TensorInfo& output,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -257,12 +234,6 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-    bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                           const TensorInfo& output,
-                           const OriginsDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsMinimumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
@@ -346,16 +317,6 @@
                            const ResizeDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-    bool IsResizeBilinearSupported(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsRsqrtSupported(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsShapeSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -380,11 +341,6 @@
                                  const SpaceToDepthDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-    bool IsSplitterSupported(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsSplitterSupported(const TensorInfo& input,
                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                              const ViewsDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/TensorHandle.hpp b/src/backends/backendsCommon/TensorHandle.hpp
index 4e9d87d..b898bd1 100644
--- a/src/backends/backendsCommon/TensorHandle.hpp
+++ b/src/backends/backendsCommon/TensorHandle.hpp
@@ -242,16 +242,17 @@
     std::shared_ptr<ConstTensorHandle> m_TensorHandle;
 };
 
-using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstCpuTensorHandle is deprecated, "
-                                                "use ConstTensorHandle instead") = ConstTensorHandle;
-using CpuTensorHandle ARMNN_DEPRECATED_MSG("CpuTensorHandle is deprecated, "
-                                           "use TensorHandle instead") = TensorHandle;
-using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG("ScopedCpuTensorHandle is deprecated, "
-                                                 "use ScopedTensorHandle instead") = ScopedTensorHandle;
-using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("PassthroughCpuTensorHandle is deprecated, use "
-                                                      "PassthroughTensorHandle instead") = PassthroughTensorHandle;
-using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG("ConstPassthroughCpuTensorHandle is "
+using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstCpuTensorHandle is deprecated, "
+                                                "use ConstTensorHandle instead", "22.05") = ConstTensorHandle;
+using CpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("CpuTensorHandle is deprecated, "
+                                           "use TensorHandle instead", "22.05") = TensorHandle;
+using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ScopedCpuTensorHandle is deprecated, "
+                                                 "use ScopedTensorHandle instead", "22.05") = ScopedTensorHandle;
+using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("PassthroughCpuTensorHandle is deprecated, use "
+                                                      "PassthroughTensorHandle instead",
+                                                      "22.05") = PassthroughTensorHandle;
+using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstPassthroughCpuTensorHandle is "
                                                            "deprecated, use ConstPassthroughTensorHandle "
-                                                           "instead") = ConstPassthroughTensorHandle;
+                                                           "instead", "22.05") = ConstPassthroughTensorHandle;
 
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index fe22133..27b59ea 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -157,15 +157,12 @@
 
 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
 {
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    if (tensor.GetDataType() != DataType::QSymmS8 &&
-        tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
+    if (tensor.GetDataType() != DataType::QSymmS8)
     {
         throw InvalidArgumentException(descName +
             ": Expected data type which supports per-axis quantization scheme but got " +
             GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
     }
-    ARMNN_NO_DEPRECATE_WARN_END
 }
 
 //---------------------------------------------------------------
@@ -362,15 +359,12 @@
     const DataType inputType = inputInfo.GetDataType();
     if (IsQuantized8BitType(inputType))
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
         const std::vector<DataType> validTypes =
         {
             DataType::QAsymmS8,
             DataType::QAsymmU8,
-            DataType::QSymmS8,
-            DataType::QuantizedSymm8PerAxis // deprecated
+            DataType::QSymmS8
         };
-        ARMNN_NO_DEPRECATE_WARN_END
 
         ValidateDataTypes(weightInfo, validTypes, descName);
     }
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 896081e..29d39d1 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -345,10 +345,15 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
-struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor>
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+struct
+ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ResizeBilinearQueueDescriptor is deprecated use ResizeQueueDescriptor instead",
+                                  "22.08")
+ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor>
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
+ARMNN_NO_DEPRECATE_WARN_END
 
 struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor>
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 666f83d..3b7f3a0 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1499,13 +1499,6 @@
                                          modelOptions);
 }
 
-// Default Implementations
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
-                                                       const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
                                                               const WorkloadInfo& /*info*/) const
 {
@@ -1644,12 +1637,6 @@
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
-                                                         const WorkloadInfo& /*Info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
                                                                     const WorkloadInfo& /*info*/) const
 {
@@ -1680,12 +1667,6 @@
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
-                                                           const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
     const InstanceNormalizationQueueDescriptor& /*descriptor*/,
     const WorkloadInfo& /*info*/) const
@@ -1753,12 +1734,6 @@
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
-                                                          const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
                                                            const WorkloadInfo& /*info*/) const
 {
@@ -1848,24 +1823,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
-                                                                  const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
                                                             const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
-                                                         const WorkloadInfo& /*info*/) const
-{
-    return std::unique_ptr<IWorkload>();
-}
-
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateShape(const ShapeQueueDescriptor& /*descriptor*/,
                                                          const WorkloadInfo& /*info*/) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index c16fcb8..df4bcd6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -68,10 +68,6 @@
                                                               DataLayout dataLayout,
                                                               const bool IsMemoryManaged = true) const = 0;
 
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    virtual std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
-                                                 const WorkloadInfo& info) const;
-
     virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                         const WorkloadInfo&              info) const;
 
@@ -141,10 +137,6 @@
     virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                               const WorkloadInfo& Info) const;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& Info) const;
-
     virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
                                                               const WorkloadInfo& info) const;
 
@@ -160,10 +152,6 @@
     virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                                     const WorkloadInfo& info) const;
-
     virtual std::unique_ptr<IWorkload> CreateInstanceNormalization(
         const InstanceNormalizationQueueDescriptor& descriptor,
         const WorkloadInfo& info) const;
@@ -198,10 +186,6 @@
     virtual std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
-    ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
-    virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
-                                                    const WorkloadInfo&          info) const;
-
     virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const;
 
@@ -250,14 +234,6 @@
     virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
-    ARMNN_DEPRECATED_MSG("Use CreateResize instead")
-    virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const;
-
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info) const;
-
     virtual std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 2952023..ef507a6 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -34,10 +34,6 @@
                                                       const bool /*IsMemoryManaged*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
-                                         const WorkloadInfo& /*info*/) const override
-    { return nullptr; }
-
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
                                                 const WorkloadInfo& /*info*/) const override
     { return nullptr; }
@@ -111,19 +107,17 @@
     {
         if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
         {
-            AbsQueueDescriptor absDescriptor;
-            return CreateAbs(absDescriptor, info);
+            { return nullptr; }
         }
         else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
         {
-            RsqrtQueueDescriptor rsqrtDescriptor;
-            return CreateRsqrt(rsqrtDescriptor, info);
+            { return nullptr; }
         }
         else if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
         {
             return CreateLogicalUnary(descriptor, info);
         }
-        return nullptr;
+        { return nullptr; }
     }
 
     std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
@@ -234,10 +228,6 @@
                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
-                                           const WorkloadInfo& /*info*/) const override
-    { return nullptr; }
-
     std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index bd7f09b..fe68193 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -265,13 +265,9 @@
             case DataType::QAsymmU8:
                 weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
                 break;
-            ARMNN_NO_DEPRECATE_WARN_BEGIN
-            case DataType::QuantizedSymm8PerAxis:
-                ARMNN_FALLTHROUGH;
             case DataType::QSymmS8:
                 weightPermuted = ReorderWeightChannelsForAcl<int8_t>(weightPermuted, dataLayout, permuteBuffer);
                 break;
-            ARMNN_NO_DEPRECATE_WARN_END
             default:
                 break;
         }
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index e706fc8..df1a5c1 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -117,11 +117,6 @@
     return IMemoryManagerUniquePtr{};
 }
 
-IBackendInternal::Optimizations MockBackend::GetOptimizations() const
-{
-    return Optimizations{};
-}
-
 IBackendInternal::ILayerSupportSharedPtr MockBackend::GetLayerSupport() const
 {
     static ILayerSupportSharedPtr layerSupport{new MockLayerSupport};
diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp
index d90ad79..c062452 100644
--- a/src/backends/backendsCommon/test/MockBackend.hpp
+++ b/src/backends/backendsCommon/test/MockBackend.hpp
@@ -162,7 +162,6 @@
         CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
                                       IBackendProfilingPtr& backendProfiling) override;
 
-    IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp
index ebe9434..ea6ece7 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.cpp
@@ -79,11 +79,6 @@
     return std::make_unique<RefMemoryManager>();
 }
 
-IBackendInternal::Optimizations MockImportBackend::GetOptimizations() const
-{
-    return Optimizations{};
-}
-
 IBackendInternal::ILayerSupportSharedPtr MockImportBackend::GetLayerSupport() const
 {
     static ILayerSupportSharedPtr layerSupport{new MockImportLayerSupport};
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp
index ecc661f..c07a97c 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportBackend.hpp
@@ -40,7 +40,6 @@
     IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
         const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override;
 
-    IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index b85232e..dd58e00 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -192,11 +192,6 @@
     return IBackendProfilingContextPtr{};
 }
 
-IBackendInternal::Optimizations ClBackend::GetOptimizations() const
-{
-    return Optimizations{};
-}
-
 IBackendInternal::IBackendSpecificModelContextPtr ClBackend::CreateBackendSpecificModelContext(
     const ModelOptions& modelOptions) const
 {
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index 0a069b9..80e4b97 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -78,7 +78,6 @@
     IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
         const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override;
 
-    IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 0873021..9a50f4a 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -176,14 +176,6 @@
 {
 }
 
-bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
-                                    const TensorInfo& output,
-                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
-    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
 bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
                                            const TensorInfo& output,
                                            const ActivationDescriptor& descriptor,
@@ -563,15 +555,6 @@
                                    descriptor);
 }
 
-bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
-                                        const TensorInfo& input1,
-                                        const TensorInfo& output,
-                                        Optional<std::string&> reasonIfUnsupported) const
-{
-    ComparisonDescriptor descriptor(ComparisonOperation::Greater);
-    return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
-}
-
 bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
@@ -690,14 +673,6 @@
                                    descriptor);
 }
 
-bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                       const TensorInfo& output,
-                                       const MergerDescriptor& descriptor,
-                                       Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
 bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
                                         const TensorInfo& input1,
                                         const TensorInfo& output,
@@ -864,29 +839,6 @@
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
-bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
-                                               const TensorInfo& output,
-                                               Optional<std::string&> reasonIfUnsupported) const
-{
-    ResizeDescriptor descriptor;
-    descriptor.m_Method     = ResizeMethod::Bilinear;
-    descriptor.m_DataLayout = DataLayout::NCHW;
-
-    const TensorShape& outputShape = output.GetShape();
-    descriptor.m_TargetHeight = outputShape[2];
-    descriptor.m_TargetWidth  = outputShape[3];
-
-    return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported) const
-{
-    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
-    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
 bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       const SliceDescriptor& descriptor,
@@ -928,17 +880,6 @@
 }
 
 bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
-                                         const ViewsDescriptor& descriptor,
-                                         Optional<std::string&> reasonIfUnsupported) const
-{
-    IgnoreUnused(descriptor);
-    return IsSupportedForDataTypeCl(reasonIfUnsupported,
-                                    input.GetDataType(),
-                                    &TrueFunc<>,
-                                    &TrueFunc<>);
-}
-
-bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                          const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                                          const ViewsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 43ae428..e7a6748 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -18,11 +18,6 @@
     ClLayerSupport();
     ~ClLayerSupport() {}
 
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsAbsSupported(const TensorInfo& input,
-                        const TensorInfo& output,
-                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -145,12 +140,6 @@
                            const GatherDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsGreaterSupported(const TensorInfo& input0,
-                            const TensorInfo& input1,
-                            const TensorInfo& ouput,
-                            Optional<std::string&> reasonIfUnsupported) const override;
-
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -196,12 +185,6 @@
                          const MeanDescriptor& descriptor,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-    bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                           const TensorInfo& output,
-                           const MergerDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsMinimumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
@@ -277,16 +260,6 @@
                            const ResizeDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-    bool IsResizeBilinearSupported(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsRsqrtSupported(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsSliceSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           const SliceDescriptor& descriptor,
@@ -307,11 +280,6 @@
                                  const SpaceToDepthDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-    bool IsSplitterSupported(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsSplitterSupported(const TensorInfo& input,
                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                              const ViewsDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 530cb69..3400799 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -194,17 +194,6 @@
         PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
 }
 
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
-                                                        const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
-    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
-
-    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const
 {
@@ -376,17 +365,6 @@
     }
 }
 
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ComparisonQueueDescriptor comparisonDescriptor;
-    comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
-
-    return CreateComparison(comparisonDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const
 {
@@ -414,17 +392,6 @@
     return MakeWorkload<ClGatherWorkload>(descriptor, info, m_CLCompileContext);
 }
 
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ComparisonQueueDescriptor comparisonDescriptor;
-    comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
-
-    return CreateComparison(comparisonDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
@@ -507,12 +474,6 @@
     return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    return CreateConcat(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
@@ -609,32 +570,6 @@
     return MakeWorkload<ClResizeWorkload>(descriptor, info, m_CLCompileContext);
 }
 
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                                   const WorkloadInfo& info) const
-{
-    ResizeQueueDescriptor resizeDescriptor;
-    resizeDescriptor.m_Inputs  = descriptor.m_Inputs;
-    resizeDescriptor.m_Outputs = descriptor.m_Outputs;
-
-    resizeDescriptor.m_Parameters.m_Method       = ResizeMethod::Bilinear;
-    resizeDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
-    resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-    resizeDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
-
-    return CreateResize(resizeDescriptor, info);
-}
-
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
-    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
-
-    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 7f01ee0..3ca33c8 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -55,10 +55,6 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
-                                         const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
@@ -119,10 +115,6 @@
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
@@ -135,10 +127,6 @@
     std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
@@ -169,10 +157,6 @@
     std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
-    std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
@@ -221,14 +205,6 @@
     std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateResize instead")
-    std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 41b97c1..7d378fc 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -129,9 +129,6 @@
         case DataType::QAsymmU8:
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
             break;
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case DataType::QuantizedSymm8PerAxis:
-            ARMNN_FALLTHROUGH;
         case DataType::QAsymmS8:
         case DataType::QSymmS8:
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int8_t>());
@@ -139,7 +136,6 @@
         case DataType::QSymmS16:
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int16_t>());
             break;
-        ARMNN_NO_DEPRECATE_WARN_END
         case DataType::Signed32:
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
             break;
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 1787675..2c3abfd 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -103,11 +103,6 @@
     return IBackendProfilingContextPtr{};
 }
 
-IBackendInternal::Optimizations NeonBackend::GetOptimizations() const
-{
-    return Optimizations{};
-}
-
 IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext(
     const ModelOptions& modelOptions) const
 {
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index 20da73a..d28ac3b 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -48,7 +48,6 @@
     IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
     IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
         const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override;
-    IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index ec64f90..d742229 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -145,14 +145,6 @@
 {
 }
 
-bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported) const
-{
-    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
-    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
 bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
                                              const TensorInfo& output,
                                              const ActivationDescriptor& descriptor,
@@ -537,15 +529,6 @@
                                    descriptor);
 }
 
-bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
-                                          const armnn::TensorInfo& input1,
-                                          const armnn::TensorInfo& output,
-                                          armnn::Optional<std::string&> reasonIfUnsupported) const
-{
-    ComparisonDescriptor descriptor(ComparisonOperation::Greater);
-    return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
-}
-
 bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
@@ -653,14 +636,6 @@
                                    descriptor);
 }
 
-bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                         const TensorInfo& output,
-                                         const MergerDescriptor& descriptor,
-                                         Optional<std::string&> reasonIfUnsupported) const
-{
-     return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
 bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
                                           const TensorInfo& input1,
                                           const TensorInfo& output,
@@ -852,29 +827,6 @@
                                    descriptor);
 }
 
-bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    ResizeDescriptor descriptor;
-    descriptor.m_Method     = ResizeMethod::Bilinear;
-    descriptor.m_DataLayout = DataLayout::NCHW;
-
-    const TensorShape& outputShape = output.GetShape();
-    descriptor.m_TargetHeight = outputShape[2];
-    descriptor.m_TargetWidth  = outputShape[3];
-
-    return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input,
-                                        const TensorInfo& output,
-                                        Optional<std::string&> reasonIfUnsupported) const
-{
-    ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
-    return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
-}
-
 bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input,
                                         const TensorInfo& output,
                                         const SliceDescriptor& descriptor,
@@ -920,17 +872,6 @@
 }
 
 bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
-                                           const ViewsDescriptor& descriptor,
-                                           Optional<std::string&> reasonIfUnsupported) const
-{
-    armnn::IgnoreUnused(descriptor);
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      input.GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
-}
-
-bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                            const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                                            const ViewsDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index fc1e1f6..155d96a 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -19,11 +19,6 @@
 
     ~NeonLayerSupport() {}
 
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsAbsSupported(const TensorInfo& input,
-                        const TensorInfo& output,
-                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -150,12 +145,6 @@
                            const GatherDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsGreaterSupported(const TensorInfo& input0,
-                            const TensorInfo& input1,
-                            const TensorInfo& output,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -201,12 +190,6 @@
                          const MeanDescriptor& descriptor,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-    bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                           const TensorInfo& output,
-                           const MergerDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsMinimumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
@@ -287,16 +270,6 @@
                            const ResizeDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
-    bool IsResizeBilinearSupported(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsRsqrtSupported(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsSliceSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           const SliceDescriptor& descriptor,
@@ -317,11 +290,6 @@
                                  const SpaceToDepthDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-    bool IsSplitterSupported(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsSplitterSupported(const TensorInfo& input,
                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                              const ViewsDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 9ec7583..605b03d 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -131,17 +131,6 @@
     return tensorHandle;
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
-    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
-
-    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                  const WorkloadInfo&              info) const
 {
@@ -323,17 +312,6 @@
     }
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ComparisonQueueDescriptor comparisonDescriptor;
-    comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
-
-    return CreateComparison(comparisonDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
@@ -358,17 +336,6 @@
     return std::make_unique<NeonGatherWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                                              const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-
-    ComparisonQueueDescriptor comparisonDescriptor;
-    comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
-
-    return CreateComparison(comparisonDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                             const WorkloadInfo&        info) const
 {
@@ -449,12 +416,6 @@
     return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
-                                                                    const WorkloadInfo&          info) const
-{
-    return CreateConcat(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                               const WorkloadInfo& info) const
 {
@@ -552,32 +513,6 @@
     return std::make_unique<NeonResizeWorkload>(descriptor, info);
 }
 
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
-    const ResizeBilinearQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    ResizeQueueDescriptor resizeDescriptor;
-    resizeDescriptor.m_Inputs  = descriptor.m_Inputs;
-    resizeDescriptor.m_Outputs = descriptor.m_Outputs;
-
-    resizeDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
-    resizeDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
-    resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
-    return CreateResize(resizeDescriptor, info);
-}
-
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
-                                                            const WorkloadInfo &info) const
-{
-    IgnoreUnused(descriptor);
-
-    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
-    elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
-
-    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 41fc506..bd84c05 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -52,10 +52,6 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
-                                         const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
@@ -122,10 +118,6 @@
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& Info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
@@ -138,10 +130,6 @@
     std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
@@ -172,10 +160,6 @@
     std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
-    std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
@@ -224,14 +208,6 @@
     std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateResize instead")
-    std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 1199f30..f51493d 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -73,14 +73,10 @@
         case DataType::QAsymmU8:
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
             break;
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case DataType::QuantizedSymm8PerAxis:
-            ARMNN_FALLTHROUGH;
         case DataType::QSymmS8:
         case DataType::QAsymmS8:
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
             break;
-        ARMNN_NO_DEPRECATE_WARN_END
         case DataType::Signed32:
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
             break;
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index c9f164e..a3060f0 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -58,27 +58,12 @@
     return std::make_unique<RefMemoryManager>();
 }
 
-IBackendInternal::Optimizations RefBackend::GetOptimizations() const
-{
-    return Optimizations{};
-}
-
 IBackendInternal::ILayerSupportSharedPtr RefBackend::GetLayerSupport() const
 {
     static ILayerSupportSharedPtr layerSupport{new RefLayerSupport};
     return layerSupport;
 }
 
-bool RefBackend::HasCapability(BackendCapability capabilityClass) const
-{
-    auto search = oldCpuRefCapabilities.find(capabilityClass);
-    if (search != oldCpuRefCapabilities.end())
-    {
-        return true;
-    }
-    return false;
-}
-
 OptimizationViews RefBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
 {
     OptimizationViews optimizationViews;
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 28c1591..c04bf43 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -47,7 +47,6 @@
     IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(
         const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling) override;
 
-    IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
@@ -60,8 +59,6 @@
     {
         return cpuRefCapabilities;
     };
-
-    bool HasCapability(BackendCapability capabilityClass) const override;
 };
 
 } // namespace armnn
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index c0ede67..b80aa99 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -58,15 +58,6 @@
 
 } // anonymous namespace
 
-bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output,
-                                     Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsElementwiseUnarySupported(input,
-                                       output,
-                                       ElementwiseUnaryDescriptor(UnaryOperation::Abs),
-                                       reasonIfUnsupported);
-}
-
 bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
                                             const TensorInfo& output,
                                             const ActivationDescriptor& descriptor,
@@ -565,15 +556,12 @@
     const DataType inputType = input.GetDataType();
     if (IsQuantized8BitType(inputType))
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        std::array<DataType, 4> supportedWeightTypes =
+        std::array<DataType, 3> supportedWeightTypes =
         {
             DataType::QAsymmS8,
             DataType::QAsymmU8,
-            DataType::QSymmS8,
-            DataType::QuantizedSymm8PerAxis // deprecated
+            DataType::QSymmS8
         };
-        ARMNN_NO_DEPRECATE_WARN_END
 
         supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
                                       "Reference Convolution2d: weights type not supported for quantized input.");
@@ -769,15 +757,12 @@
     const DataType inputType = input.GetDataType();
     if (IsQuantized8BitType(inputType))
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        std::array<DataType, 4> supportedWeightTypes =
+        std::array<DataType, 3> supportedWeightTypes =
                 {
                         DataType::QAsymmS8,
                         DataType::QAsymmU8,
                         DataType::QSymmS8,
-                        DataType::QuantizedSymm8PerAxis // deprecated
                 };
-        ARMNN_NO_DEPRECATE_WARN_END
 
         supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
                                        "Reference DepthwiseConvolution2d: weights type not supported for "
@@ -977,18 +962,6 @@
     return supported;
 }
 
-bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0,
-                                       const TensorInfo& input1,
-                                       const TensorInfo& output,
-                                       Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsComparisonSupported(input0,
-                                 input1,
-                                 output,
-                                 ComparisonDescriptor(ComparisonOperation::Equal),
-                                 reasonIfUnsupported);
-}
-
 bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
                                                   const FakeQuantizationDescriptor& descriptor,
                                                   Optional<std::string&> reasonIfUnsupported) const
@@ -1173,18 +1146,6 @@
     return supported;
 }
 
-bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
-                                         const TensorInfo& input1,
-                                         const TensorInfo& output,
-                                         Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsComparisonSupported(input0,
-                                 input1,
-                                 output,
-                                 ComparisonDescriptor(ComparisonOperation::Greater),
-                                 reasonIfUnsupported);
-}
-
 bool RefLayerSupport::IsInputSupported(const TensorInfo& /*input*/,
                                        Optional<std::string&> /*reasonIfUnsupported*/) const
 {
@@ -1523,14 +1484,6 @@
     return supported;
 }
 
-bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                        const TensorInfo& output,
-                                        const MergerDescriptor& descriptor,
-                                        Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
-}
-
 bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
                                          const TensorInfo &output,
                                          Optional<std::string &> reasonIfUnsupported) const
@@ -1897,33 +1850,6 @@
         "Reference reshape: input type not supported.");
 }
 
-bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
-                                                const TensorInfo& output,
-                                                Optional<std::string&> reasonIfUnsupported) const
-{
-    bool supported = true;
-    std::array<DataType,6> supportedTypes =
-    {
-        DataType::BFloat16,
-        DataType::Float32,
-        DataType::Float16,
-        DataType::QAsymmS8,
-        DataType::QAsymmU8,
-        DataType::QSymmS16
-    };
-
-    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference ResizeBilinear: input type not supported");
-
-    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference ResizeBilinear: output type not supported");
-
-    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference ResizeBilinear: input and output types not matching");
-
-    return supported;
-}
-
 bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
                                         const TensorInfo& output,
                                         const ResizeDescriptor& descriptor,
@@ -1953,16 +1879,6 @@
     return supported;
 }
 
-bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
-                                       const TensorInfo& output,
-                                       Optional<std::string&> reasonIfUnsupported) const
-{
-    return IsElementwiseUnarySupported(input,
-                                       output,
-                                       ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt),
-                                       reasonIfUnsupported);
-}
-
 bool RefLayerSupport::IsShapeSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        Optional<std::string&> reasonIfUnsupported) const
@@ -2101,28 +2017,6 @@
 }
 
 bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
-                                          const ViewsDescriptor& descriptor,
-                                          Optional<std::string&> reasonIfUnsupported) const
-{
-    IgnoreUnused(descriptor);
-    bool supported = true;
-    std::array<DataType,6> supportedTypes =
-    {
-        DataType::BFloat16,
-        DataType::Float32,
-        DataType::Float16,
-        DataType::QAsymmS8,
-        DataType::QAsymmU8,
-        DataType::QSymmS16
-    };
-
-    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference splitter: input type not supported");
-
-    return supported;
-}
-
-bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                           const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                                           const ViewsDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
@@ -2322,15 +2216,12 @@
     const DataType inputType = input.GetDataType();
     if (IsQuantized8BitType(inputType))
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        std::array<DataType, 4> supportedWeightTypes =
+        std::array<DataType, 3> supportedWeightTypes =
         {
             DataType::QAsymmS8,
             DataType::QAsymmU8,
-            DataType::QSymmS8,
-            DataType::QuantizedSymm8PerAxis //Deprecated
+            DataType::QSymmS8
         };
-        ARMNN_NO_DEPRECATE_WARN_END
 
         supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
                                       "Reference TransposeConvolution2d: weights type not supported for "
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 627418e..53d7907 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -12,11 +12,6 @@
 class RefLayerSupport : public LayerSupportBase
 {
 public:
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsAbsSupported(const TensorInfo& input,
-                        const TensorInfo& output,
-                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -147,12 +142,6 @@
                                      const ElementwiseUnaryDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsEqualSupported(const TensorInfo& input0,
-                          const TensorInfo& input1,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -179,12 +168,6 @@
                            const GatherDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
-    bool IsGreaterSupported(const TensorInfo& input0,
-                            const TensorInfo& input1,
-                            const TensorInfo& output,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -230,12 +213,6 @@
                          const MeanDescriptor& descriptor,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
-    bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                           const TensorInfo& output,
-                           const MergerDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsMemCopySupported(const TensorInfo& input,
                             const TensorInfo& output,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -301,20 +278,11 @@
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    bool IsResizeBilinearSupported(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsResizeSupported(const TensorInfo& input,
                            const TensorInfo& output,
                            const ResizeDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
-    bool IsRsqrtSupported(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsShapeSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -340,11 +308,6 @@
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional())
                                  const override;
 
-    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
-    bool IsSplitterSupported(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
     bool IsSplitterSupported(const TensorInfo& input,
                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                              const ViewsDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 18a5af2..75008bc 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -129,16 +129,6 @@
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
-    elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
-
-    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& info) const
 {
@@ -331,16 +321,6 @@
     return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-    ComparisonQueueDescriptor comparisonDescriptor;
-    comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
-
-    return CreateComparison(comparisonDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
                                                                       const WorkloadInfo& info) const
 {
@@ -379,16 +359,6 @@
     return std::make_unique<RefGatherWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-    ComparisonQueueDescriptor comparisonDescriptor;
-    comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
-
-    return CreateComparison(comparisonDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
@@ -479,12 +449,6 @@
     return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
-{
-    return CreateConcat(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const
 {
@@ -615,28 +579,6 @@
     return std::make_unique<RefResizeWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                                    const WorkloadInfo& info) const
-{
-    ResizeQueueDescriptor resizeDescriptor;
-    resizeDescriptor.m_Parameters.m_Method       = ResizeMethod::Bilinear;
-    resizeDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
-    resizeDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
-    resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
-    return CreateResize(resizeDescriptor, info);
-}
-
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
-{
-    IgnoreUnused(descriptor);
-    ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
-    elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
-
-    return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
-}
-
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateShape(const ShapeQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index d00d3ca..a85e8dd 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -66,10 +66,6 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
-                                         const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
@@ -139,10 +135,6 @@
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
@@ -158,10 +150,6 @@
     std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateComparison instead")
-    std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
@@ -192,10 +180,6 @@
     std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
-    std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
@@ -241,14 +225,6 @@
     std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
-    ARMNN_DEPRECATED_MSG("Use CreateResize instead")
-    std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
-    ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
-    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 7d6c59a..f8169a6 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -88,7 +88,6 @@
         workloads/RefQuantizeWorkload.cpp \
         workloads/RefReduceWorkload.cpp \
         workloads/RefReshapeWorkload.cpp \
-        workloads/RefResizeBilinearWorkload.cpp \
         workloads/RefResizeWorkload.cpp \
         workloads/RefSliceWorkload.cpp \
         workloads/RefSoftmaxWorkload.cpp \
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index e169c03..5727291 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -148,8 +148,6 @@
     RefReduceWorkload.hpp
     RefReshapeWorkload.cpp
     RefReshapeWorkload.hpp
-    RefResizeBilinearWorkload.cpp
-    RefResizeBilinearWorkload.hpp
     RefResizeWorkload.cpp
     RefResizeWorkload.hpp
     RefShapeWorkload.hpp
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index cd0dc5d..c2a456b 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -67,13 +67,6 @@
 {
     switch(info.GetDataType())
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case armnn::DataType::QuantizedSymm8PerAxis:
-        {
-            std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
-            return std::make_unique<QSymm8PerAxisDecoder>(static_cast<const int8_t*>(data), info);
-        }
-        ARMNN_NO_DEPRECATE_WARN_END
         case DataType::QAsymmS8:
         {
             return std::make_unique<QASymmS8Decoder>(
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index a2d565e..a7be9e1 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -22,16 +22,6 @@
 {
     switch(info.GetDataType())
     {
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        case armnn::DataType::QuantizedSymm8PerAxis:
-        {
-            std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
-            return std::make_unique<QSymm8PerAxisEncoder>(
-                static_cast<int8_t*>(data),
-                params.second,
-                params.first);
-        }
-        ARMNN_NO_DEPRECATE_WARN_END
         case armnn::DataType::QAsymmS8:
         {
             return std::make_unique<QASymmS8Encoder>(
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
deleted file mode 100644
index 2cf5888..0000000
--- a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "RefResizeBilinearWorkload.hpp"
-
-#include "RefWorkloadUtils.hpp"
-#include "Resize.hpp"
-#include "BaseIterator.hpp"
-#include "Profiling.hpp"
-
-#include "BaseIterator.hpp"
-#include "Decoders.hpp"
-#include "Encoders.hpp"
-
-namespace armnn
-{
-
-void RefResizeBilinearWorkload::Execute() const
-{
-    Execute(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefResizeBilinearWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
-{
-    Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
-}
-
-void RefResizeBilinearWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
-{
-    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearWorkload_Execute");
-
-    const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
-
-    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, inputs[0]->Map());
-    Decoder<float> &decoder = *decoderPtr;
-    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, outputs[0]->Map());
-    Encoder<float> &encoder = *encoderPtr;
-
-    Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, armnn::ResizeMethod::Bilinear);
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
deleted file mode 100644
index 5ada3d1..0000000
--- a/src/backends/reference/workloads/RefResizeBilinearWorkload.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-
-namespace armnn
-{
-
-class RefResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
-    using BaseWorkload<ResizeBilinearQueueDescriptor>::BaseWorkload;
-    void Execute() const override;
-    void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)  override;
-private:
-    void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
-};
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index ed3aa90..914137c 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -59,7 +59,6 @@
 #include "RefRankWorkload.hpp"
 #include "RefReduceWorkload.hpp"
 #include "RefReshapeWorkload.hpp"
-#include "RefResizeBilinearWorkload.hpp"
 #include "RefResizeWorkload.hpp"
 #include "RefShapeWorkload.hpp"
 #include "RefSliceWorkload.hpp"