IVGCVSW-6981 Remove deprecated code 22.05 [Post Release]

Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Change-Id: I9ccaefbe28ea572e9e2b4a2168574804667f7460
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 959675d..1fcadb0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -160,13 +160,11 @@
     include/armnn/backends/ILayerSupport.hpp
     include/armnn/backends/ICustomAllocator.hpp
     include/armnn/IAsyncExecutionCallback.hpp
-    include/armnn/ILayerVisitor.hpp
     include/armnn/INetwork.hpp
     include/armnn/IProfiler.hpp
     include/armnn/IRuntime.hpp
     include/armnn/IStrategy.hpp
     include/armnn/IWorkingMemHandle.hpp
-    include/armnn/LayerVisitorBase.hpp
     include/armnn/Logging.hpp
     include/armnn/LstmParams.hpp
     include/armnn/MemorySources.hpp
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
index c0df47b..2fdfc70 100644
--- a/delegate/src/MultiLayerFacade.hpp
+++ b/delegate/src/MultiLayerFacade.hpp
@@ -96,26 +96,12 @@
         return m_FirstLayer->GetGuid();
     }
 
-    // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
-    // the deprecated ILayerVisitor which is used in the function.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
-                                      "Accept function is deprecated. Use IStrategy in combination with "
-                                      "ExecuteStrategy instead, which is an ABI/API stable version of the "
-                                      "visitor pattern.",
-                                      "22.05")
-    virtual void Accept(armnn::ILayerVisitor& visitor) const override
-    {
-        // Do not expect this function to be used so not providing an implementation
-    }
-    ARMNN_NO_DEPRECATE_WARN_END
-
     virtual void ExecuteStrategy(armnn::IStrategy& strategy) const override
     {
         // Do not expect this function to be used so not providing an implementation
         // if an implementation is required and the chain contains more than two operators
         // would have to provide a way to record the intermediate layers so they could be
-        // visited... the same applies to the Accept method above and the BackendSelectionHint
+        // visited... the same applies to the BackendSelectionHint
         // below.
     }
 
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 87b99f0..09c7385 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -467,10 +467,6 @@
 Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
                                                             const armnn::BackendId& backend);
 
-/// Convenience function to check a capability on a backend
-ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetBackendCapability", "22.05")
-bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability);
-
 /// Returns the number of cached files if backend supports caching
 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend);
 
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 7f46c6a..628d045 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -480,11 +480,7 @@
                && m_ConstantWeights == rhs.m_ConstantWeights;
     }
 
-    /// Get the number of views/inputs.
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
-    uint32_t GetNumViews() const;
-
-    /// Get the number of views/inputs.
+    /// Get the number of inputs.
     uint32_t GetNumInputs() const;
 
     /// Enable/disable bias.
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
deleted file mode 100644
index a0c782e..0000000
--- a/include/armnn/ILayerVisitor.hpp
+++ /dev/null
@@ -1,534 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <armnn/Deprecated.hpp>
-#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/NetworkFwd.hpp>
-#include <armnn/Optional.hpp>
-#include <armnn/TensorFwd.hpp>
-#include <armnn/Types.hpp>
-
-namespace armnn
-{
-class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable IStrategy instead.", "22.05") ILayerVisitor
-{
-protected:
-    ILayerVisitor() {}
-    virtual ~ILayerVisitor() {}
-
-public:
-
-    /// Function that an activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param activationDescriptor - ActivationDescriptor to configure the activation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitActivationLayer(const IConnectableLayer* layer,
-                                      const ActivationDescriptor& activationDescriptor,
-                                      const char* name = nullptr) = 0;
-
-    /// Function that an addition layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitAdditionLayer(const IConnectableLayer* layer,
-                                    const char* name = nullptr) = 0;
-
-    /// Function that an arg min max layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param argMinMaxDescriptor - ArgMinMaxDescriptor to configure the activation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitArgMinMaxLayer(const IConnectableLayer* layer,
-                                     const ArgMinMaxDescriptor& argMinMaxDescriptor,
-                                     const char* name = nullptr) = 0;
-
-    /// Function that a batch normalization layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param mean - Pre-calculated mean for each channel.
-    /// @param variance - Pre-calculated variance for each channel.
-    /// @param beta - Per-channel additive factor.
-    /// @param gamma - Per-channel multiplicative factor.
-    /// @param name - Optional name for the layer.
-    virtual void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
-                                              const BatchNormalizationDescriptor& desc,
-                                              const ConstTensor& mean,
-                                              const ConstTensor& variance,
-                                              const ConstTensor& beta,
-                                              const ConstTensor& gamma,
-                                              const char* name = nullptr) = 0;
-
-    /// Function that a batch to space ND layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param batchToSpaceNdDescriptor - Description of the layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
-                                          const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                          const char* name = nullptr) = 0;
-
-    /// Function a Comparison layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param comparisonDescriptor - Description of the layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitComparisonLayer(const IConnectableLayer* layer,
-                                      const ComparisonDescriptor& comparisonDescriptor,
-                                      const char* name = nullptr) = 0;
-
-    /// Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
-    ///                           process. Number of Views must be equal to the number of inputs, and their order
-    ///                           must match - e.g. first view corresponds to the first input, second view to the
-    ///                           second input, etc....
-    /// @param name - Optional name for the layer.
-    virtual void VisitConcatLayer(const IConnectableLayer* layer,
-                                  const OriginsDescriptor& concatDescriptor,
-                                  const char* name = nullptr) = 0;
-
-    /// Function a layer with no inputs and a single output, which always corresponds to
-    /// the passed in constant tensor should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param input - Tensor to be provided as the only output of the layer. The layer will maintain
-    ///                its own copy of the tensor data, meaning the memory referenced by @a input can
-    ///                be freed or reused after this function is called.
-    /// @param name - Optional name for the layer.
-    virtual void VisitConstantLayer(const IConnectableLayer* layer,
-                                    const ConstTensor& input,
-                                    const char* name = nullptr) = 0;
-
-    /// Function that a 2D convolution layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param convolution2dDescriptor - Description of the 2D convolution layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                         const Convolution2dDescriptor& convolution2dDescriptor,
-                                         const char* name = nullptr) = 0;
-
-    /// Function that a 2D convolution layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param convolution2dDescriptor - Description of the 2D convolution layer.
-    /// @param weights - Tensor for the weights data.
-    /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitConvolution2dLayer without ConstTensors")
-    virtual void VisitConvolution2dLayer(const IConnectableLayer* layer,
-                                         const Convolution2dDescriptor& convolution2dDescriptor,
-                                         const ConstTensor& weights,
-                                         const Optional<ConstTensor>& biases,
-                                         const char* name = nullptr) = 0;
-
-    /// Function a depth to space layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitDepthToSpaceLayer(const IConnectableLayer* layer,
-                                        const DepthToSpaceDescriptor& depthToSpaceDescriptor,
-                                        const char* name = nullptr) = 0;
-
-    /// Function that a 2D depthwise convolution layer with biases should call back to when its
-    /// Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                                  const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-                                                  const char* name = nullptr) = 0;
-
-    /// Function that a 2D depthwise convolution layer with biases should call back to when its
-    /// Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
-    /// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
-    /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG("Use VisitDepthwiseConvolution2dLayer without ConstTensors")
-    virtual void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
-                                                  const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-                                                  const ConstTensor& weights,
-                                                  const Optional<ConstTensor>& biases,
-                                                  const char* name = nullptr) = 0;
-
-    /// Function that a Dequantize layer should call back to when its
-    /// Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitDequantizeLayer(const IConnectableLayer* layer,
-                                      const char* name = nullptr) = 0;
-
-    /// Function that a Detection PostProcess layer should call back to when its
-    /// Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param descriptor - Description of the Detection PostProcess layer.
-    /// @param anchors - Tensor for the anchors.
-    /// @param name - Optional name for the layer.
-    virtual void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
-                                                const DetectionPostProcessDescriptor& descriptor,
-                                                const ConstTensor& anchors,
-                                                const char* name = nullptr) = 0;
-
-    /// Function a division layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitDivisionLayer(const IConnectableLayer* layer,
-                                    const char* name = nullptr) = 0;
-
-    /// Function a ElementwiseUnary layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param elementwiseUnaryDescriptor - Description of the layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitElementwiseUnaryLayer(const IConnectableLayer* layer,
-                                            const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
-                                            const char* name = nullptr) = 0;
-
-    /// Function a fill layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param fillDescriptor - Description of the layer
-    /// @param name - Optional name for the layer.
-    virtual void VisitFillLayer(const IConnectableLayer* layer,
-                                const FillDescriptor& fillDescriptor,
-                                const char* name = nullptr) = 0;
-
-    /// Function a floor layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitFloorLayer(const IConnectableLayer* layer,
-                                 const char* name = nullptr) = 0;
-
-
-    /// Function that a fully connected layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param fullyConnectedDescriptor - Description of the fully connected layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
-                                          const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                          const char* name = nullptr) = 0;
-
-    /// Function that a fully connected layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param fullyConnectedDescriptor - Description of the fully connected layer.
-    /// @param weights - Tensor for the weights data.
-    /// @param biases - Optional tensor for the bias data.
-    /// @param name - Optional name for the layer.
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use VisitFullyConnectedLayer without ConstTensors", "22.05")
-    virtual void VisitFullyConnectedLayer(const IConnectableLayer* layer,
-                                          const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                          const ConstTensor& weights,
-                                          const Optional<ConstTensor>& biases,
-                                          const char* name = nullptr) = 0;
-
-    /// Function a Gather layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param gatherDescriptor - Parameters for the gather operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitGatherLayer(const IConnectableLayer* layer,
-                                  const GatherDescriptor& gatherDescriptor,
-                                  const char* name = nullptr) = 0;
-
-    /// Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified
-    ///             when passing the inputs to the IRuntime::EnqueueWorkload() function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitInputLayer(const IConnectableLayer* layer,
-                                 LayerBindingId id,
-                                 const char* name = nullptr) = 0;
-
-    /// Function that an instance normalization layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param desc - Parameters for the instance normalization operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitInstanceNormalizationLayer(const IConnectableLayer* layer,
-                                                 const InstanceNormalizationDescriptor& desc,
-                                                 const char* name = nullptr) = 0;
-
-    /// Function that an L2 normalization layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked. Normalization is performed along dimension 1, but requires a 4d input.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param desc - Parameters for the L2 normalization operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitL2NormalizationLayer(const IConnectableLayer* layer,
-                                           const L2NormalizationDescriptor& desc,
-                                           const char* name = nullptr) = 0;
-
-    /// Function that a log softmax layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
-    /// @param name - Optional name for the layer.
-    virtual void VisitLogSoftmaxLayer(const IConnectableLayer* layer,
-                                      const LogSoftmaxDescriptor& logSoftmaxDescriptor,
-                                      const char* name = nullptr) = 0;
-
-    /// Function that a logical binary layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param logicalBinaryDescriptor - LogicalBinaryDescriptor to configure the logical unary layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitLogicalBinaryLayer(const IConnectableLayer* layer,
-                                         const LogicalBinaryDescriptor& logicalBinaryDescriptor,
-                                         const char* name = nullptr) = 0;
-
-    /// Function an Lstm layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param descriptor - Parameters controlling the operation of the Lstm operation.
-    /// @param params - The weights and biases for the LSTM cell.
-    /// @param name - Optional name for the layer.
-    virtual void VisitLstmLayer(const IConnectableLayer* layer,
-                                const LstmDescriptor& descriptor,
-                                const LstmInputParams& params,
-                                const char* name = nullptr) = 0;
-
-    /// Function a Maximum layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitMaximumLayer(const IConnectableLayer* layer,
-                                   const char* name = nullptr) = 0;
-
-    /// Function a Mean layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param meanDescriptor - Parameters for the mean operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitMeanLayer(const IConnectableLayer* layer,
-                                const MeanDescriptor& meanDescriptor,
-                                const char* name = nullptr) = 0;
-
-    /// Function that a merge layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitMergeLayer(const IConnectableLayer* layer,
-                                 const char* name = nullptr) = 0;
-
-    /// Function a Minimum layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitMinimumLayer(const IConnectableLayer* layer,
-                                   const char* name = nullptr) = 0;
-
-    /// Function that a multiplication layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitMultiplicationLayer(const IConnectableLayer* layer,
-                                          const char* name = nullptr) = 0;
-
-    /// Function that a normalization layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
-    /// @param name - Optional name for the layer.
-    virtual void VisitNormalizationLayer(const IConnectableLayer* layer,
-                                         const NormalizationDescriptor& normalizationDescriptor,
-                                         const char* name = nullptr) = 0;
-
-    /// Function an output layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
-    /// when passing the outputs to the IRuntime::EnqueueWorkload() function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitOutputLayer(const IConnectableLayer* layer,
-                                  LayerBindingId id,
-                                  const char* name = nullptr) = 0;
-
-    /// Function a pad layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
-    ///                   such that paddings[i,0] indicates the amount of padding to add in front of dimension i, and
-    ///                   paddings[i,1] indicates the amount of padding to add after the end of dimension i
-    /// @param name - Optional name for the layer.
-    virtual void VisitPadLayer(const IConnectableLayer* layer,
-                               const PadDescriptor& padDescriptor,
-                               const char* name = nullptr) = 0;
-
-    /// Function that a permute layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param permuteDescriptor - PermuteDescriptor to configure the permute.
-    /// @param name - Optional name for the layer.
-    virtual void VisitPermuteLayer(const IConnectableLayer* layer,
-                                   const PermuteDescriptor& permuteDescriptor,
-                                   const char* name = nullptr) = 0;
-
-    /// Function that a pooling layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
-    /// @param name - Optional name for the layer.
-    virtual void VisitPooling2dLayer(const IConnectableLayer* layer,
-                                     const Pooling2dDescriptor& pooling2dDescriptor,
-                                     const char* name = nullptr) = 0;
-
-    /// Function that a pooling layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param pooling3dDescriptor - Pooling3dDescriptor to configure the pooling.
-    /// @param name - Optional name for the layer.
-    virtual void VisitPooling3dLayer(const IConnectableLayer* layer,
-                                     const Pooling3dDescriptor& pooling3dDescriptor,
-                                     const char* name = nullptr) = 0;
-
-    /// Function that a PReLU activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitPreluLayer(const IConnectableLayer* layer,
-                                 const char* name = nullptr) = 0;
-
-    /// Function a quantize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitQuantizeLayer(const IConnectableLayer* layer,
-                                    const char* name = nullptr) = 0;
-
-    /// Function a QLstm layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param descriptor - Parameters controlling the operation of the QLstm operation.
-    /// @param params - The weights and biases for the layer
-    /// @param name - Optional name for the layer.
-    virtual void VisitQLstmLayer(const IConnectableLayer* layer,
-                                 const QLstmDescriptor& descriptor,
-                                 const LstmInputParams& params,
-                                 const char* name = nullptr) = 0;
-
-    /// Function a QuantizedLstm layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param params - The weights and biases for the Quantized LSTM cell
-    /// @param name - Optional name for the layer.
-    virtual void VisitQuantizedLstmLayer(const IConnectableLayer* layer,
-                                         const QuantizedLstmInputParams& params,
-                                         const char* name = nullptr) = 0;
-
-    /// Function a rank layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitRankLayer(const IConnectableLayer* layer,
-                                const char* name = nullptr) = 0;
-
-    /// Function that a reduce layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param ReduceDescriptor - Parameters for the reduce max operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitReduceLayer(const IConnectableLayer* layer,
-                                  const ReduceDescriptor& reduceDescriptor,
-                                  const char* name = nullptr) = 0;
-
-    /// Function a reshape layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param reshapeDescriptor - Parameters for the reshape operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitReshapeLayer(const IConnectableLayer* layer,
-                                   const ReshapeDescriptor& reshapeDescriptor,
-                                   const char* name = nullptr) = 0;
-
-    /// Function that a resize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param resizeDescriptor - Parameters for the resize operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitResizeLayer(const IConnectableLayer* layer,
-                                  const ResizeDescriptor& resizeDescriptor,
-                                  const char* name = nullptr) = 0;
-
-    /// Function that a slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitSliceLayer(const IConnectableLayer* layer,
-                                 const SliceDescriptor& sliceDescriptor,
-                                 const char* name = nullptr) = 0;
-
-
-    /// Function that a softmax layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
-    /// @param name - Optional name for the layer.
-    virtual void VisitSoftmaxLayer(const IConnectableLayer* layer,
-                                   const SoftmaxDescriptor& softmaxDescriptor,
-                                   const char* name = nullptr) = 0;
-
-    /// Function a space to batch layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
-                                          const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                          const char* name = nullptr) = 0;
-
-    /// Function a space to depth layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitSpaceToDepthLayer(const IConnectableLayer* layer,
-                                        const SpaceToDepthDescriptor& spaceToDepthDescriptor,
-                                        const char* name = nullptr) = 0;
-
-    /// Function that a splitter layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
-    ///                             Number of Views must be equal to the number of outputs,
-    ///                             and their order must match - e.g. first view corresponds to
-    ///                             the first output, second view to the second output, etc....
-    /// @param name - Optional name for the layer.
-    virtual void VisitSplitterLayer(const IConnectableLayer* layer,
-                                    const ViewsDescriptor& splitterDescriptor,
-                                    const char* name = nullptr) = 0;
-
-    /// Function a stack layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param stackDescriptor - Parameters for the stack operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitStackLayer(const IConnectableLayer* layer,
-                                 const StackDescriptor& stackDescriptor,
-                                 const char* name = nullptr) = 0;
-
-    /// Function a StandInLayer should call back to when its Accept(ILaterVisitor&) function is invoked
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param standInDescriptor - Parameters for the stand-in layer.
-    /// @param name - Optional name for the layer.
-    virtual void VisitStandInLayer(const IConnectableLayer* layer,
-                                   const StandInDescriptor& standInDescriptor,
-                                   const char* name = nullptr) = 0;
-
-    /// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param stridedSliceDescriptor - Parameters for the strided slice operation.
-    /// @param name - Optional name for the layer.
-    virtual void VisitStridedSliceLayer(const IConnectableLayer* layer,
-                                        const StridedSliceDescriptor& stridedSliceDescriptor,
-                                        const char* name = nullptr) = 0;
-
-    /// Function a subtraction layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitSubtractionLayer(const IConnectableLayer* layer,
-                                       const char* name = nullptr) = 0;
-
-    /// Function a switch layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param name - Optional name for the layer.
-    virtual void VisitSwitchLayer(const IConnectableLayer* layer,
-                                  const char* name = nullptr) = 0;
-
-    /// Function that a 2D transpose convolution layer should call back to when its Accept(ILayerVisitor&)
-    /// function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param descriptor - Description of the 2D transpose convolution layer.
-    /// @param weights - Tensor for the weights data.
-    /// @param biases - Optional tensor for the bias data.
-    /// @param name - Optional name for the layer.
-    virtual void VisitTransposeConvolution2dLayer(const IConnectableLayer* layer,
-                                                  const TransposeConvolution2dDescriptor& descriptor,
-                                                  const ConstTensor& weights,
-                                                  const Optional<ConstTensor>& biases,
-                                                  const char* name = nullptr) = 0;
-
-    /// Function that a transpose  layer should call back to when its Accept(ILayerVisitor&) function is invoked.
-    /// @param layer - pointer to the layer which is calling back to this visit function.
-    /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
-    /// @param name - Optional name for the layer.
-    virtual void VisitTransposeLayer(const IConnectableLayer* layer,
-                                     const TransposeDescriptor& transposeDescriptor,
-                                     const char* name = nullptr) = 0;
-
-    virtual void StartVisit() {}
-    virtual void FinishVisit() {}
-
-};
-} // namespace armnn
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1ed5600..94afbf3 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -7,7 +7,6 @@
 #include <armnn/BackendOptions.hpp>
 #include <armnn/Deprecated.hpp>
 #include <armnn/DescriptorsFwd.hpp>
-#include <armnn/ILayerVisitor.hpp>
 #include <armnn/IStrategy.hpp>
 #include <armnn/NetworkFwd.hpp>
 #include <armnn/Optional.hpp>
@@ -95,18 +94,6 @@
     /// Returns the unique id of the layer
     virtual LayerGuid GetGuid() const = 0;
 
-    // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
-    // the deprecated ILayerVisitor which is used in the function.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    /// Apply a visitor to this layer
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
-                                      "Accept function is deprecated. Use IStrategy in combination with "
-                                      "ExecuteStrategy instead, which is an ABI/API stable version of the "
-                                      "visitor pattern.",
-                                      "22.05")
-    virtual void Accept(ILayerVisitor& visitor) const = 0;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     /// Apply a visitor to this layer
     virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
 
@@ -764,17 +751,6 @@
     IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& descriptor,
                                               const char* name = nullptr);
 
-    // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
-    // the deprecated ILayerVisitor which is used in the function.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    /// Apply a visitor to this layer
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
-                                      "Accept function is deprecated. Use IStrategy in combination with "
-                                      "ExecuteStrategy instead, which is an ABI/API stable version of the "
-                                      "visitor pattern.",
-                                      "22.05")
-    void Accept(ILayerVisitor& visitor) const;
-    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const;
 
diff --git a/include/armnn/IStrategy.hpp b/include/armnn/IStrategy.hpp
index 8d29565..aed881c 100644
--- a/include/armnn/IStrategy.hpp
+++ b/include/armnn/IStrategy.hpp
@@ -7,6 +7,9 @@
 #include <armnn/DescriptorsFwd.hpp>
 #include <armnn/Types.hpp>
 
+#include <armnn/NetworkFwd.hpp>
+#include <armnn/TensorFwd.hpp>
+
 namespace armnn
 {
 
@@ -17,7 +20,7 @@
 virtual ~IStrategy() {}
 
 public:
-virtual void ExecuteStrategy(const armnn::IConnectableLayer* layer,
+virtual void ExecuteStrategy(const IConnectableLayer* layer,
                              const armnn::BaseDescriptor& descriptor,
                              const std::vector<armnn::ConstTensor>& constants,
                              const char* name,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
deleted file mode 100644
index 025fca7..0000000
--- a/include/armnn/LayerVisitorBase.hpp
+++ /dev/null
@@ -1,271 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/ILayerVisitor.hpp>
-
-namespace armnn
-{
-
-struct VisitorThrowingPolicy
-{
-    static void Apply(const std::string& errorMessage = "") { throw UnimplementedException(errorMessage); }
-};
-
-struct VisitorNoThrowPolicy
-{
-    static void Apply(const std::string&) {}
-};
-
-/// Visitor base class with empty implementations.
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-template<typename DefaultPolicy>
-class LayerVisitorBase : public ILayerVisitor
-{
-protected:
-    LayerVisitorBase() {}
-    virtual ~LayerVisitorBase() {}
-
-public:
-
-    void VisitActivationLayer(const IConnectableLayer*,
-                              const ActivationDescriptor&,
-                              const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitAdditionLayer(const IConnectableLayer*,
-                            const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitArgMinMaxLayer(const IConnectableLayer*,
-                             const ArgMinMaxDescriptor&,
-                             const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitBatchNormalizationLayer(const IConnectableLayer*,
-                                      const BatchNormalizationDescriptor&,
-                                      const ConstTensor&,
-                                      const ConstTensor&,
-                                      const ConstTensor&,
-                                      const ConstTensor&,
-                                      const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitBatchToSpaceNdLayer(const IConnectableLayer*,
-                                  const BatchToSpaceNdDescriptor&,
-                                  const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitComparisonLayer(const IConnectableLayer*,
-                              const ComparisonDescriptor&,
-                              const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitConcatLayer(const IConnectableLayer*,
-                          const ConcatDescriptor&,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitConstantLayer(const IConnectableLayer*,
-                            const ConstTensor&,
-                            const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitConvolution2dLayer(const IConnectableLayer*,
-                                 const Convolution2dDescriptor&,
-                                 const ConstTensor&,
-                                 const Optional<ConstTensor>&,
-                                 const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitConvolution2dLayer(const IConnectableLayer*,
-                                 const Convolution2dDescriptor&,
-                                 const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitDepthToSpaceLayer(const IConnectableLayer*,
-                                const DepthToSpaceDescriptor&,
-                                const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
-                                          const DepthwiseConvolution2dDescriptor&,
-                                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitDepthwiseConvolution2dLayer(const IConnectableLayer*,
-                                          const DepthwiseConvolution2dDescriptor&,
-                                          const ConstTensor&,
-                                          const Optional<ConstTensor>&,
-                                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitDequantizeLayer(const IConnectableLayer*,
-                              const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitDetectionPostProcessLayer(const IConnectableLayer*,
-                                        const DetectionPostProcessDescriptor&,
-                                        const ConstTensor&,
-                                        const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitDivisionLayer(const IConnectableLayer*,
-                            const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitElementwiseUnaryLayer(const IConnectableLayer*,
-                                    const ElementwiseUnaryDescriptor&,
-                                    const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitFillLayer(const IConnectableLayer*,
-                        const FillDescriptor&,
-                        const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitFloorLayer(const IConnectableLayer*,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitFullyConnectedLayer(const IConnectableLayer*,
-                                  const FullyConnectedDescriptor&,
-                                  const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitFullyConnectedLayer(const IConnectableLayer*,
-                                  const FullyConnectedDescriptor&,
-                                  const ConstTensor&,
-                                  const Optional<ConstTensor>&,
-                                  const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitGatherLayer(const IConnectableLayer*,
-                          const GatherDescriptor&,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitInputLayer(const IConnectableLayer*,
-                         LayerBindingId,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitInstanceNormalizationLayer(const IConnectableLayer*,
-                                         const InstanceNormalizationDescriptor&,
-                                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitL2NormalizationLayer(const IConnectableLayer*,
-                                   const L2NormalizationDescriptor&,
-                                   const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitLogSoftmaxLayer(const IConnectableLayer*,
-                              const LogSoftmaxDescriptor&,
-                              const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitLogicalBinaryLayer(const IConnectableLayer*,
-                                 const LogicalBinaryDescriptor&,
-                                 const char*) override {DefaultPolicy::Apply(__func__); }
-
-    void VisitLstmLayer(const IConnectableLayer*,
-                        const LstmDescriptor&,
-                        const LstmInputParams&,
-                        const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitMaximumLayer(const IConnectableLayer*,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitMeanLayer(const IConnectableLayer*,
-                        const MeanDescriptor&,
-                        const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitMergeLayer(const IConnectableLayer*,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitMinimumLayer(const IConnectableLayer*,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitMultiplicationLayer(const IConnectableLayer*,
-                                  const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitNormalizationLayer(const IConnectableLayer*,
-                                 const NormalizationDescriptor&,
-                                 const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitOutputLayer(const IConnectableLayer*,
-                          LayerBindingId,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitPadLayer(const IConnectableLayer*,
-                       const PadDescriptor&,
-                       const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitPermuteLayer(const IConnectableLayer*,
-                           const PermuteDescriptor&,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitPooling2dLayer(const IConnectableLayer*,
-                             const Pooling2dDescriptor&,
-                             const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitPreluLayer(const IConnectableLayer*,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitQuantizeLayer(const IConnectableLayer*,
-                            const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitQLstmLayer(const IConnectableLayer*,
-                         const QLstmDescriptor&,
-                         const LstmInputParams&,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitQuantizedLstmLayer(const IConnectableLayer*,
-                                 const QuantizedLstmInputParams&,
-                                 const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitRankLayer(const IConnectableLayer*,
-                        const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitReduceLayer(const IConnectableLayer*,
-                          const ReduceDescriptor&,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitReshapeLayer(const IConnectableLayer*,
-                           const ReshapeDescriptor&,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitResizeLayer(const IConnectableLayer*,
-                          const ResizeDescriptor&,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSliceLayer(const IConnectableLayer*,
-                         const SliceDescriptor&,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSoftmaxLayer(const IConnectableLayer*,
-                           const SoftmaxDescriptor&,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSpaceToBatchNdLayer(const IConnectableLayer*,
-                                  const SpaceToBatchNdDescriptor&,
-                                  const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSpaceToDepthLayer(const IConnectableLayer*,
-                                const SpaceToDepthDescriptor&,
-                                const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSplitterLayer(const IConnectableLayer*,
-                            const ViewsDescriptor&,
-                            const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitStackLayer(const IConnectableLayer*,
-                         const StackDescriptor&,
-                         const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitStandInLayer(const IConnectableLayer*,
-                           const StandInDescriptor&,
-                           const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitStridedSliceLayer(const IConnectableLayer*,
-                                const StridedSliceDescriptor&,
-                                const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSubtractionLayer(const IConnectableLayer*,
-                               const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitSwitchLayer(const IConnectableLayer*,
-                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitTransposeConvolution2dLayer(const IConnectableLayer*,
-                                          const TransposeConvolution2dDescriptor&,
-                                          const ConstTensor&,
-                                          const Optional<ConstTensor>&,
-                                          const char*) override { DefaultPolicy::Apply(__func__); }
-
-    void VisitTransposeLayer(const IConnectableLayer*,
-                             const TransposeDescriptor&,
-                             const char*) override { DefaultPolicy::Apply(__func__); }
-
-};
-ARMNN_NO_DEPRECATE_WARN_END
-
-} // namespace armnn
diff --git a/include/armnn/backends/CMakeLists.txt b/include/armnn/backends/CMakeLists.txt
index 19046ed..978916e 100644
--- a/include/armnn/backends/CMakeLists.txt
+++ b/include/armnn/backends/CMakeLists.txt
@@ -4,7 +4,6 @@
 #
 
 list(APPEND armnnBackendsAPI_sources
-     CpuTensorHandleFwd.hpp
      TensorHandleFwd.hpp
      DynamicBackend.hpp
      IBackendInternal.hpp
diff --git a/include/armnn/backends/CpuTensorHandleFwd.hpp b/include/armnn/backends/CpuTensorHandleFwd.hpp
deleted file mode 100644
index a5a28d8..0000000
--- a/include/armnn/backends/CpuTensorHandleFwd.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-// This file is deprecated and will be removed soon.
-// Please use the new file include/armnn/TensorHandleFwd.hpp instead.
-
-#pragma once
-
-namespace armnn
-{
-
-class ConstCpuTensorHandle;
-class CpuTensorHandle;
-class ScopedCpuTensorHandle;
-class PassthroughCpuTensorHandle;
-class ConstPassthroughCpuTensorHandle;
-
-} // namespace armnn
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 98f0eaa..e393a7e 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -172,10 +172,6 @@
         return BackendCapabilities("IBackendInternal NullCapabilities");
     };
 
-    /// Returns true if backend support the capability false otherwise
-    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated in favour of GetCapability", "22.05")
-    virtual bool HasCapability(BackendCapability /*capabilityClass*/) const { return false; }
-
     /// Signals the backend to use a custom memory allocator provided by the user
     ///
     /// \param allocator - a pointer to the provided ICustomAllocator to use with this backend
diff --git a/include/armnn/backends/TensorHandle.hpp b/include/armnn/backends/TensorHandle.hpp
index 2e6c848..c69f7c8 100644
--- a/include/armnn/backends/TensorHandle.hpp
+++ b/include/armnn/backends/TensorHandle.hpp
@@ -251,17 +251,4 @@
     std::shared_ptr<ConstTensorHandle> m_TensorHandle;
 };
 
-using ConstCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstCpuTensorHandle is deprecated, "
-                                                "use ConstTensorHandle instead", "22.05") = ConstTensorHandle;
-using CpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("CpuTensorHandle is deprecated, "
-                                           "use TensorHandle instead", "22.05") = TensorHandle;
-using ScopedCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ScopedCpuTensorHandle is deprecated, "
-                                                 "use ScopedTensorHandle instead", "22.05") = ScopedTensorHandle;
-using PassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("PassthroughCpuTensorHandle is deprecated, use "
-                                                      "PassthroughTensorHandle instead",
-                                                      "22.05") = PassthroughTensorHandle;
-using ConstPassthroughCpuTensorHandle ARMNN_DEPRECATED_MSG_REMOVAL_DATE("ConstPassthroughCpuTensorHandle is "
-                                                           "deprecated, use ConstPassthroughTensorHandle "
-                                                           "instead", "22.05") = ConstPassthroughTensorHandle;
-
 } // namespace armnn
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index a5278eb..5b5bece 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -110,22 +110,6 @@
     return false;
 }
 
-/// Convenience function to check a capability on a backend
-bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability)
-{
-    bool hasCapability = false;
-    auto const& backendRegistry = armnn::BackendRegistryInstance();
-    if (backendRegistry.IsBackendRegistered(backend))
-    {
-        auto factoryFunc = backendRegistry.GetFactory(backend);
-        auto backendObject = factoryFunc();
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        hasCapability = backendObject->HasCapability(capability);
-        ARMNN_NO_DEPRECATE_WARN_END
-    }
-    return hasCapability;
-}
-
 unsigned int GetNumberOfCacheFiles(const armnn::BackendId& backend)
 {
     auto const& backendRegistry = armnn::BackendRegistryInstance();
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 4eb875e..c740fd0 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -450,11 +450,6 @@
     return armnn::GetNumInputs(m_BiasEnabled);
 }
 
-uint32_t FullyConnectedDescriptor::GetNumViews() const
-{
-    return armnn::GetNumInputs(m_BiasEnabled);
-}
-
 uint32_t DepthwiseConvolution2dDescriptor::GetNumInputs() const
 {
     return armnn::GetNumInputs(m_BiasEnabled);
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5edf66c..12c782c 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -476,4 +476,4 @@
     LayerBindingId m_Id;
 };
 
-}
+} //namespace armnn
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9520c13..8fe4445 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -456,13 +456,6 @@
     return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void INetwork::Accept(ILayerVisitor& visitor) const
-{
-    return pNetworkImpl->Accept(visitor);
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void INetwork::ExecuteStrategy(IStrategy& strategy) const
 {
     return pNetworkImpl->ExecuteStrategy(strategy);
@@ -2909,16 +2902,6 @@
     return layer;
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void NetworkImpl::Accept(ILayerVisitor& visitor) const
-{
-    for (auto layer : GetGraph())
-    {
-        layer->Accept(visitor);
-    };
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
 {
     for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2d34cfc..a4387e6 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -253,10 +253,6 @@
 
     IConnectableLayer* AddConvertFp32ToFp16Layer(const char* name = nullptr);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const;
 
 private:
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 13fa24a..072d13c 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -47,11 +47,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void AbsLayer::Accept(ILayerVisitor& visitor) const
+void AbsLayer::ExecuteStrategy(IStrategy &strategy) const
 {
-    visitor.VisitAbsLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GeName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 9ab6662..250bd8a 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -28,10 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create an AbsLayer.
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 63c98a9..eea18d7 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -45,11 +45,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ActivationLayer::Accept(ILayerVisitor& visitor) const
+void ActivationLayer::ExecuteStrategy(IStrategy &strategy) const
 {
-    visitor.VisitActivationLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 47b7f66..8d1196f 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -26,10 +26,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create an ActivationLayer.
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index f55bb55e..7117c14 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -32,11 +32,9 @@
     return CloneBase<AdditionLayer>(graph, GetName());
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void AdditionLayer::Accept(ILayerVisitor& visitor) const
+void AdditionLayer::ExecuteStrategy(IStrategy &strategy) const
 {
-    visitor.VisitAdditionLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 71a8553..6980677 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -23,9 +23,7 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     AdditionLayer* Clone(Graph& graph) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create an AdditionLayer.
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 30db7ba..57a6ff1 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -86,11 +86,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
+void ArgMinMaxLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index f212536..7a6b783 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -34,9 +34,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ArgMinMaxLayer.
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 15a42dd..6f0e1a8 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -71,24 +71,6 @@
     return {m_Mean, m_Variance, m_Beta, m_Gamma};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
-{
-    ManagedConstTensorHandle managedMean(m_Mean);
-    ManagedConstTensorHandle managedVariance(m_Variance);
-    ManagedConstTensorHandle managedBeta(m_Beta);
-    ManagedConstTensorHandle managedGamma(m_Gamma);
-
-    ConstTensor meanTensor(managedMean.GetTensorInfo(), managedMean.Map());
-    ConstTensor varianceTensor(managedVariance.GetTensorInfo(), managedVariance.Map());
-    ConstTensor betaTensor(managedBeta.GetTensorInfo(), managedBeta.Map());
-    ConstTensor gammaTensor(managedGamma.GetTensorInfo(), managedGamma.Map());
-
-    visitor.VisitBatchNormalizationLayer(
-            this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void BatchNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     ManagedConstTensorHandle managedMean(m_Mean);
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 10ca7ec..9715c56 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -39,10 +39,6 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 485500d..f022c52 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -95,11 +95,9 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void BatchToSpaceNdLayer::Accept(ILayerVisitor& visitor) const
+void BatchToSpaceNdLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitBatchToSpaceNdLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index bb6eb71..a2c480a 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -34,9 +34,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a BatchToSpaceNdLayer.
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index 03b6865..efa74c8 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -46,12 +46,4 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void CastLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("CastLayer VisitCastLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 } // namespace armnn
diff --git a/src/armnn/layers/CastLayer.hpp b/src/armnn/layers/CastLayer.hpp
index e044813..e01e91c 100644
--- a/src/armnn/layers/CastLayer.hpp
+++ b/src/armnn/layers/CastLayer.hpp
@@ -28,10 +28,6 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
 protected:
     /// Constructor to create a CastLayer.
     CastLayer(const char *name);
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index 78a2393..33ea70e 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -47,11 +47,4 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ChannelShuffleLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("ChannelShuffleLayer: VisitChannelShuffleLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
 }
\ No newline at end of file
diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp
index 903d161..79ab426 100644
--- a/src/armnn/layers/ChannelShuffleLayer.hpp
+++ b/src/armnn/layers/ChannelShuffleLayer.hpp
@@ -11,9 +11,6 @@
 class ChannelShuffleLayer : public LayerWithParameters<ChannelShuffleDescriptor>
 {
 public:
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
 
     /// Creates a dynamically-allocated copy of this layer.
     /// @param graph The graph into which this layer is being cloned
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index cf16386..b6cd48b 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -74,11 +74,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ComparisonLayer::Accept(ILayerVisitor& visitor) const
+void ComparisonLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitComparisonLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index 07534af..7361c6b 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -35,9 +35,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ComparisonLayer
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index b59e0b9..69660dd 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -318,11 +318,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConcatLayer::Accept(ILayerVisitor& visitor) const
+void ConcatLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitConcatLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index fefedea..db971ed 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -44,9 +44,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ConcatLayer.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 0c06dd5..aee95d0 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -62,15 +62,6 @@
                outShape);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConstantLayer::Accept(ILayerVisitor& visitor) const
-{
-    ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
-    ConstTensor layerOutputTensor(managedLayerOutput.GetTensorInfo(), managedLayerOutput.Map());
-    visitor.VisitConstantLayer(this, layerOutputTensor, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void ConstantLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     ManagedConstTensorHandle managedLayerOutput(m_LayerOutput);
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index d3dd8cf..f5ab546 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -39,10 +39,6 @@
     /// Free up the constant source data stored by the layer.
     void ReleaseConstantData() override {}
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
     std::shared_ptr<ConstTensorHandle> m_LayerOutput;
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 6d843f3..d7ad692 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -47,14 +47,12 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
+void ConvertBf16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const
 {
     // these conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index d2c0066..7131275 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ConvertBf16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index cc3c8b1..423721d 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -47,14 +47,12 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
+void ConvertFp16ToFp32Layer::ExecuteStrategy(IStrategy& strategy) const
 {
     // these conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index 59faf64..ab01a20 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ConvertFp16ToFp32Layer.
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index 978fbd1..1556dee 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -48,14 +48,12 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
+void ConvertFp32ToBf16Layer::ExecuteStrategy(IStrategy& strategy) const
 {
     // these conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("ConvertFp32ToBf16Layer should never appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 8e33cb2..71de4fb 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ConvertFp32ToBf16Layer.
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 2e1074a..748cde3 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,14 +47,12 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
+void ConvertFp32ToFp16Layer::ExecuteStrategy(IStrategy& strategy) const
 {
     // These conversion layers are only inserted by the
     // optimizer and so will never be in an input graph.
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index e331c7d..8ae0f7c 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -27,9 +27,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ConvertFp32ToFp16Layer.
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index dbbd009..d023397 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -150,16 +150,9 @@
     return {m_Weight, m_Bias};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
-{
-    visitor.VisitConvolution2dLayer(this, GetParameters(), GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    strategy.ExecuteStrategy(this, GetParameters(), { }, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
 
 } // namespace armnn
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index 6bb86da..5799970 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -44,9 +44,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index 42b275e..b01870a 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -124,14 +124,6 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution3dLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Convolution3dLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("Convolution3dLayer: VisitConvolution3dLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void Convolution3dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
diff --git a/src/armnn/layers/Convolution3dLayer.hpp b/src/armnn/layers/Convolution3dLayer.hpp
index 7cbd642..85628e5 100644
--- a/src/armnn/layers/Convolution3dLayer.hpp
+++ b/src/armnn/layers/Convolution3dLayer.hpp
@@ -37,10 +37,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
     void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 90a55cb..57cf3b7 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -52,13 +52,11 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DebugLayer::Accept(ILayerVisitor& visitor) const
+void DebugLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     // by design debug layers are never in input graphs
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("DebugLayer should never appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index 054f5e4..fe7ad5c 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a DebugLayer.
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 033154e..2414b00 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -75,11 +75,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
+void DepthToSpaceLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitDepthToSpaceLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index d9f6752..c7e08e9 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -35,9 +35,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a DepthToSpaceLayer.
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 4fd2804..dcd800e 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -153,13 +153,6 @@
     return {m_Weight, m_Bias};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
-{
-    visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void DepthwiseConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index e8ae9a6..9b0e6ad 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -43,10 +43,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
     void SerializeLayerParameters(ParameterStringifyFunction& fn) const override;
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index afa0a73..7bc03f4 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -46,11 +46,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DequantizeLayer::Accept(ILayerVisitor& visitor) const
+void DequantizeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitDequantizeLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index 99bde85..b0d04c5 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a DequantizeLayer.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 58f261c..28c6d50 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -79,16 +79,6 @@
     return { m_Anchors };
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
-{
-    ManagedConstTensorHandle managedAnchors(m_Anchors);
-    ConstTensor anchorTensor(managedAnchors.GetTensorInfo(), managedAnchors.Map());
-    visitor.VisitDetectionPostProcessLayer(this, GetParameters(), anchorTensor, GetName());
-    m_Anchors->Unmap();
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void DetectionPostProcessLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     ManagedConstTensorHandle managedAnchors(m_Anchors);
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index 1826645..07eb270 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -34,10 +34,6 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index c6faf41..e4e2a7d 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -32,11 +32,9 @@
     return CloneBase<DivisionLayer>(graph, GetName());
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void DivisionLayer::Accept(ILayerVisitor& visitor) const
+void DivisionLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitDivisionLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 91bccfc..398a947 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -24,9 +24,7 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     DivisionLayer* Clone(Graph& graph) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a DivisionLayer.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index c50910b..a6c1f16 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -55,11 +55,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
+void ElementwiseUnaryLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitElementwiseUnaryLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index 1261882..286030a 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -34,9 +34,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a ElementwiseUnaryLayer
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index f375f9a..5c6ac18 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -46,14 +46,6 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void FakeQuantizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     IgnoreUnused(strategy);
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index c115c63..464fe69 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -28,9 +28,6 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 5004fab..0822c3c 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -51,11 +51,9 @@
         inferredShapes[0][0]);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FillLayer::Accept(ILayerVisitor& visitor) const
+void FillLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitFillLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index 096d9ba..034da75 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -27,9 +27,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a FillLayer.
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 616c118..471b575 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -45,11 +45,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FloorLayer::Accept(ILayerVisitor& visitor) const
+void FloorLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitFloorLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 2b16cfa..4906c85 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a FloorLayer.
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 1f006c9..c20bc8d 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -88,13 +88,6 @@
     return {m_Weight, m_Bias};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
-{
-    visitor.VisitFullyConnectedLayer(this, GetParameters(), GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void FullyConnectedLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index e97282d..09dbb5f 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -43,10 +43,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index 33d2088..252dfd2 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -89,11 +89,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void GatherLayer::Accept(ILayerVisitor& visitor) const
+void GatherLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitGatherLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 8c29407..90cbedc 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -34,9 +34,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a GatherLayer.
diff --git a/src/armnn/layers/GatherNdLayer.cpp b/src/armnn/layers/GatherNdLayer.cpp
index 1ca2cbb..036b3cf 100644
--- a/src/armnn/layers/GatherNdLayer.cpp
+++ b/src/armnn/layers/GatherNdLayer.cpp
@@ -93,12 +93,4 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherNdLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void GatherNdLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("GatherNdLayer VisitGatherNdLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 } // namespace armnn
diff --git a/src/armnn/layers/GatherNdLayer.hpp b/src/armnn/layers/GatherNdLayer.hpp
index 9e07715..d873bd3 100644
--- a/src/armnn/layers/GatherNdLayer.hpp
+++ b/src/armnn/layers/GatherNdLayer.hpp
@@ -32,10 +32,6 @@
     /// will lead to a valid configuration of @ref GatherNdLayer.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
 protected:
     /// Constructor to create a GatherNdLayer.
     /// @param [in] name Optional name for the layer.
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index c05278f..01351f6 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -35,11 +35,9 @@
                                                "InputLayer should already have the TensorInfo set.");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void InputLayer::Accept(ILayerVisitor& visitor) const
+void InputLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitInputLayer(this, this->GetBindingId(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName(), GetBindingId());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index 2b73dce..fbc6a09 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create an InputLayer.
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 44e9870..10543c6 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -46,11 +46,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
+void InstanceNormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitInstanceNormalizationLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index addd61e..16f99ad 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a InstanceNormalizationLayer.
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 0e0ae2e..8ea242b 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -46,11 +46,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
+void L2NormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitL2NormalizationLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 21072b2..4371143 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a L2NormalizationLayer.
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 4f51a2b..0698b70 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -45,11 +45,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
+void LogSoftmaxLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitLogSoftmaxLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index 9963f85..81ee760 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -29,9 +29,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a LogSoftmaxLayer.
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 1a20c98..736a0c2 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -72,11 +72,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void LogicalBinaryLayer::Accept(ILayerVisitor& visitor) const
+void LogicalBinaryLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitLogicalBinaryLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/LogicalBinaryLayer.hpp b/src/armnn/layers/LogicalBinaryLayer.hpp
index caeaa0a..500f644 100644
--- a/src/armnn/layers/LogicalBinaryLayer.hpp
+++ b/src/armnn/layers/LogicalBinaryLayer.hpp
@@ -35,9 +35,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a LogicalBinaryLayer
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 06e5e8e..8e6bfdb 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -301,218 +301,6 @@
             m_LayerNormParameters.m_OutputLayerNormWeights};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void LstmLayer::Accept(ILayerVisitor& visitor) const
-{
-    LstmInputParams inputParams;
-    ManagedConstTensorHandle managedInputToForgetWeights(m_BasicParameters.m_InputToForgetWeights);
-    ManagedConstTensorHandle managedInputToCellWeights(m_BasicParameters.m_InputToCellWeights);
-    ManagedConstTensorHandle managedInputToOutputWeights(m_BasicParameters.m_InputToOutputWeights);
-    ManagedConstTensorHandle managedRecurrentToForgetWeights(m_BasicParameters.m_RecurrentToForgetWeights);
-    ManagedConstTensorHandle managedRecurrentToCellWeights(m_BasicParameters.m_RecurrentToCellWeights);
-    ManagedConstTensorHandle managedRecurrentToOutputWeights(m_BasicParameters.m_RecurrentToOutputWeights);
-    ManagedConstTensorHandle managedForgetGateBias(m_BasicParameters.m_ForgetGateBias);
-    ManagedConstTensorHandle managedCellBias(m_BasicParameters.m_CellBias);
-    ManagedConstTensorHandle managedOutputGateBias(m_BasicParameters.m_OutputGateBias);
-
-    // Cifg parameters
-    ManagedConstTensorHandle managedInputToInputWeights(m_CifgParameters.m_InputToInputWeights);
-    ManagedConstTensorHandle managedRecurrentToInputWeights(m_CifgParameters.m_RecurrentToInputWeights);
-    ManagedConstTensorHandle managedInputGateBias(m_CifgParameters.m_InputGateBias);
-
-    // Projection parameters
-    ManagedConstTensorHandle managedProjectionWeights(m_ProjectionParameters.m_ProjectionWeights);
-    ManagedConstTensorHandle managedProjectionBias(m_ProjectionParameters.m_ProjectionBias);
-
-    // Peephole parameters
-    ManagedConstTensorHandle managedCellToInputWeights(m_PeepholeParameters.m_CellToInputWeights);
-    ManagedConstTensorHandle managedCellToForgetWeights(m_PeepholeParameters.m_CellToForgetWeights);
-    ManagedConstTensorHandle managedCellToOutputWeights(m_PeepholeParameters.m_CellToOutputWeights);
-
-    // Layer normalisation parameters
-    ManagedConstTensorHandle managedInputLayerNormWeights(m_LayerNormParameters.m_InputLayerNormWeights);
-    ManagedConstTensorHandle managedForgetLayerNormWeights(m_LayerNormParameters.m_ForgetLayerNormWeights);
-    ManagedConstTensorHandle managedCellLayerNormWeights(m_LayerNormParameters.m_CellLayerNormWeights);
-    ManagedConstTensorHandle managedOutputLayerNormWeights(m_LayerNormParameters.m_OutputLayerNormWeights);
-
-    ConstTensor inputToInputWeightsTensor;
-    if (m_CifgParameters.m_InputToInputWeights != nullptr)
-    {
-        ConstTensor inputToInputWeightsTensorCopy(managedInputToInputWeights.GetTensorInfo(),
-                                                  managedInputToInputWeights.Map());
-        inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
-        inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
-    }
-    ConstTensor inputToForgetWeightsTensor;
-    if (m_BasicParameters.m_InputToForgetWeights != nullptr)
-    {
-        ConstTensor inputToForgetWeightsTensorCopy(managedInputToForgetWeights.GetTensorInfo(),
-                                                   managedInputToForgetWeights.Map());
-        inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
-        inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
-    }
-    ConstTensor inputToCellWeightsTensor;
-    if (m_BasicParameters.m_InputToCellWeights != nullptr)
-    {
-        ConstTensor inputToCellWeightsTensorCopy(managedInputToCellWeights.GetTensorInfo(),
-                                                 managedInputToCellWeights.Map());
-        inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
-        inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
-    }
-    ConstTensor inputToOutputWeightsTensor;
-    if (m_BasicParameters.m_InputToOutputWeights != nullptr)
-    {
-        ConstTensor inputToOutputWeightsTensorCopy(managedInputToOutputWeights.GetTensorInfo(),
-                                                   managedInputToOutputWeights.Map());
-        inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
-        inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
-    }
-    ConstTensor recurrentToInputWeightsTensor;
-    if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
-    {
-        ConstTensor recurrentToInputWeightsTensorCopy(
-                managedRecurrentToInputWeights.GetTensorInfo(),
-                managedRecurrentToInputWeights.Map());
-        recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
-        inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
-    }
-    ConstTensor recurrentToForgetWeightsTensor;
-    if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
-    {
-        ConstTensor recurrentToForgetWeightsTensorCopy(
-                managedRecurrentToForgetWeights.GetTensorInfo(),
-                managedRecurrentToForgetWeights.Map());
-        recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
-        inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
-    }
-    ConstTensor recurrentToCellWeightsTensor;
-    if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
-    {
-        ConstTensor recurrentToCellWeightsTensorCopy(
-                managedRecurrentToCellWeights.GetTensorInfo(),
-                managedRecurrentToCellWeights.Map());
-        recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
-        inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
-    }
-    ConstTensor recurrentToOutputWeightsTensor;
-    if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
-    {
-        ConstTensor recurrentToOutputWeightsTensorCopy(
-                managedRecurrentToOutputWeights.GetTensorInfo(),
-                managedRecurrentToOutputWeights.Map());
-        recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
-        inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
-    }
-    ConstTensor cellToInputWeightsTensor;
-    if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
-    {
-        ConstTensor cellToInputWeightsTensorCopy(managedCellToInputWeights.GetTensorInfo(),
-                                                 managedCellToInputWeights.Map());
-        cellToInputWeightsTensor = cellToInputWeightsTensorCopy;
-        inputParams.m_CellToInputWeights = &cellToInputWeightsTensor;
-    }
-    ConstTensor cellToForgetWeightsTensor;
-    if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
-    {
-        ConstTensor cellToForgetWeightsTensorCopy(managedCellToForgetWeights.GetTensorInfo(),
-                                                  managedCellToForgetWeights.Map());
-        cellToForgetWeightsTensor = cellToForgetWeightsTensorCopy;
-        inputParams.m_CellToForgetWeights = &cellToForgetWeightsTensor;
-    }
-    ConstTensor cellToOutputWeightsTensor;
-    if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
-    {
-        ConstTensor cellToOutputWeightsTensorCopy(managedCellToOutputWeights.GetTensorInfo(),
-                                                  managedCellToOutputWeights.Map());
-        cellToOutputWeightsTensor = cellToOutputWeightsTensorCopy;
-        inputParams.m_CellToOutputWeights = &cellToOutputWeightsTensor;
-    }
-    ConstTensor inputGateBiasTensor;
-    if (m_CifgParameters.m_InputGateBias != nullptr)
-    {
-        ConstTensor inputGateBiasTensorCopy(managedInputGateBias.GetTensorInfo(),
-                                        managedInputGateBias.Map());
-        inputGateBiasTensor = inputGateBiasTensorCopy;
-        inputParams.m_InputGateBias = &inputGateBiasTensor;
-    }
-    ConstTensor forgetGateBiasTensor;
-    if (m_BasicParameters.m_ForgetGateBias != nullptr)
-    {
-        ConstTensor forgetGateBiasTensorCopy(managedForgetGateBias.GetTensorInfo(),
-                                             managedForgetGateBias.Map());
-        forgetGateBiasTensor = forgetGateBiasTensorCopy;
-        inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
-    }
-    ConstTensor cellBiasTensor;
-    if (m_BasicParameters.m_CellBias != nullptr)
-    {
-        ConstTensor cellBiasTensorCopy(managedCellBias.GetTensorInfo(),
-                                       managedCellBias.Map());
-        cellBiasTensor = cellBiasTensorCopy;
-        inputParams.m_CellBias = &cellBiasTensor;
-    }
-    ConstTensor outputGateBias;
-    if (m_BasicParameters.m_OutputGateBias != nullptr)
-    {
-        ConstTensor outputGateBiasCopy(managedOutputGateBias.GetTensorInfo(),
-                                       managedOutputGateBias.Map());
-        outputGateBias = outputGateBiasCopy;
-        inputParams.m_OutputGateBias = &outputGateBias;
-    }
-    ConstTensor projectionWeightsTensor;
-    if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
-    {
-        ConstTensor projectionWeightsTensorCopy(managedProjectionWeights.GetTensorInfo(),
-                                                managedProjectionWeights.Map());
-        projectionWeightsTensor = projectionWeightsTensorCopy;
-        inputParams.m_ProjectionWeights = &projectionWeightsTensor;
-    }
-    ConstTensor projectionBiasTensor;
-    if (m_ProjectionParameters.m_ProjectionBias != nullptr)
-    {
-        ConstTensor projectionBiasTensorCopy(managedProjectionBias.GetTensorInfo(),
-                                             managedProjectionBias.Map());
-        projectionBiasTensor = projectionBiasTensorCopy;
-        inputParams.m_ProjectionBias = &projectionBiasTensor;
-    }
-    ConstTensor inputLayerNormTensor;
-    if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
-    {
-        ConstTensor inputLayerNormTensorCopy(managedInputLayerNormWeights.GetTensorInfo(),
-                                             managedInputLayerNormWeights.Map());
-        inputLayerNormTensor = inputLayerNormTensorCopy;
-        inputParams.m_InputLayerNormWeights = &inputLayerNormTensor;
-    }
-    ConstTensor forgetLayerNormTensor;
-    if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
-    {
-        ConstTensor forgetLayerNormTensorCopy(managedForgetLayerNormWeights.GetTensorInfo(),
-                                             managedForgetLayerNormWeights.Map());
-        forgetLayerNormTensor = forgetLayerNormTensorCopy;
-        inputParams.m_ForgetLayerNormWeights = &forgetLayerNormTensor;
-    }
-    ConstTensor cellLayerNormTensor;
-    if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
-    {
-        ConstTensor cellLayerNormTensorCopy(managedCellLayerNormWeights.GetTensorInfo(),
-                                              managedCellLayerNormWeights.Map());
-        cellLayerNormTensor = cellLayerNormTensorCopy;
-        inputParams.m_CellLayerNormWeights = &cellLayerNormTensor;
-    }
-    ConstTensor outputLayerNormTensor;
-    if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
-    {
-        ConstTensor outputLayerNormTensorCopy(managedOutputLayerNormWeights.GetTensorInfo(),
-                                            managedOutputLayerNormWeights.Map());
-        outputLayerNormTensor = outputLayerNormTensorCopy;
-        inputParams.m_OutputLayerNormWeights = &outputLayerNormTensor;
-    }
-
-
-    visitor.VisitLstmLayer(this, GetParameters(), inputParams, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void LstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     std::vector<ConstTensor> constTensors;
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index fbcc03d..7310d41 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -44,10 +44,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 7a33890..6141974 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -41,12 +41,10 @@
     ARMNN_ASSERT(GetNumOutputSlots() == 0);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MapLayer::Accept(ILayerVisitor& visitor) const
+void MapLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("MapLayer should not appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MapLayer.hpp b/src/armnn/layers/MapLayer.hpp
index d82c44a..f450c88 100644
--- a/src/armnn/layers/MapLayer.hpp
+++ b/src/armnn/layers/MapLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a MapLayer.
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 438c9be..f074cf9 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -31,11 +31,9 @@
     return CloneBase<MaximumLayer>(graph, GetName());
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MaximumLayer::Accept(ILayerVisitor& visitor) const
+void MaximumLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitMaximumLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index f032b88..2b113a4 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -24,9 +24,7 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     MaximumLayer* Clone(Graph& graph) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a MaximumLayer.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index f695cc3..49eac04 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -103,11 +103,9 @@
     ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MeanLayer::Accept(ILayerVisitor& visitor) const
+void MeanLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitMeanLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 94b0cbe..87998bf 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -29,9 +29,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a MeanLayer.
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 33b922c..3695117 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -49,14 +49,6 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MemCopyLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("MemCopyLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void MemCopyLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     IgnoreUnused(strategy);
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 3c6fd0d..4d858b1 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -28,10 +28,6 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 0a1082f..182082b 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -49,14 +49,6 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MemImportLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("MemImportLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void MemImportLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     IgnoreUnused(strategy);
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 7787701..be6c463 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -28,10 +28,6 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index c979df8..94a0c10 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -58,11 +58,9 @@
     return {inputShapes[0]};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MergeLayer::Accept(ILayerVisitor& visitor) const
+void MergeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitMergeLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index d7cfcf3..79bc6f5 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -33,9 +33,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 8947041..f3661f9 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -32,11 +32,9 @@
     return CloneBase<MinimumLayer>(graph, GetName());
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MinimumLayer::Accept(ILayerVisitor& visitor) const
+void MinimumLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitMinimumLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 634591e..17ef55e 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -24,9 +24,7 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     MinimumLayer* Clone(Graph& graph) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 36f2689..bcc77dc 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -32,11 +32,9 @@
     return CloneBase<MultiplicationLayer>(graph, GetName());
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void MultiplicationLayer::Accept(ILayerVisitor& visitor) const
+void MultiplicationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitMultiplicationLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 8acf4f6..2dea822 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -24,9 +24,7 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     MultiplicationLayer* Clone(Graph& graph) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index e42a7cf..372cd76 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -46,11 +46,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void NormalizationLayer::Accept(ILayerVisitor& visitor) const
+void NormalizationLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitNormalizationLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index e36e886..a66acd9 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 241aaeb..43dd280 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -37,11 +37,9 @@
                                                "OutputLayer: Input slot must be connected.");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void OutputLayer::Accept(ILayerVisitor& visitor) const
+void OutputLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitOutputLayer(this, GetBindingId(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName(), GetBindingId());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index d2bdf19..b77714e 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -40,9 +40,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 7900fa5..ce63d7b 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -73,11 +73,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PadLayer::Accept(ILayerVisitor& visitor) const
+void PadLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitPadLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index 9a31ae5..a688f89 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -35,10 +35,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape> &inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a PadLayer.
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index e20eea6..16dc4d6 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -57,11 +57,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PermuteLayer::Accept(ILayerVisitor& visitor) const
+void PermuteLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitPermuteLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index db256b3..37ae444 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -60,9 +60,7 @@
                GetPermutation().IsEqual(PolymorphicDowncast<const PermuteLayer*>(&other)->GetPermutation());
     }
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index 9fb055b..34deed2 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -117,11 +117,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
+void Pooling2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitPooling2dLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 677c10b..67f796e 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -34,9 +34,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
index 046e146..fe92f62 100644
--- a/src/armnn/layers/Pooling3dLayer.cpp
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -121,11 +121,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void Pooling3dLayer::Accept(ILayerVisitor& visitor) const
+void Pooling3dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitPooling3dLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/Pooling3dLayer.hpp b/src/armnn/layers/Pooling3dLayer.hpp
index 0aa4853..946d473 100644
--- a/src/armnn/layers/Pooling3dLayer.hpp
+++ b/src/armnn/layers/Pooling3dLayer.hpp
@@ -34,9 +34,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index ff2fa32..94c9afa 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -49,14 +49,6 @@
     m_PreCompiledObject = std::move(preCompiledObject);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void PreCompiledLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     IgnoreUnused(strategy);
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index 65cf79b..7b478cf 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -33,11 +33,6 @@
 
     void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 private:
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 431e2f4..e6ab0d8 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -116,11 +116,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void PreluLayer::Accept(ILayerVisitor& visitor) const
+void PreluLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitPreluLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index eecffbc..e718043 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -35,9 +35,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index eb33227..5d44c8f 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -303,238 +303,6 @@
             m_LayerNormParameters.m_OutputLayerNormWeights};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void QLstmLayer::Accept(ILayerVisitor& visitor) const
-{
-    LstmInputParams inputParams;
-    ManagedConstTensorHandle managedInputToForgetWeights(m_BasicParameters.m_InputToForgetWeights);
-    ManagedConstTensorHandle managedInputToCellWeights(m_BasicParameters.m_InputToCellWeights);
-    ManagedConstTensorHandle managedInputToOutputWeights(m_BasicParameters.m_InputToOutputWeights);
-    ManagedConstTensorHandle managedRecurrentToForgetWeights(m_BasicParameters.m_RecurrentToForgetWeights);
-    ManagedConstTensorHandle managedRecurrentToCellWeights(m_BasicParameters.m_RecurrentToCellWeights);
-    ManagedConstTensorHandle managedRecurrentToOutputWeights(m_BasicParameters.m_RecurrentToOutputWeights);
-    ManagedConstTensorHandle managedForgetGateBias(m_BasicParameters.m_ForgetGateBias);
-    ManagedConstTensorHandle managedCellBias(m_BasicParameters.m_CellBias);
-    ManagedConstTensorHandle managedOutputGateBias(m_BasicParameters.m_OutputGateBias);
-
-    // Cifg parameters
-    ManagedConstTensorHandle managedInputToInputWeights(m_CifgParameters.m_InputToInputWeights);
-    ManagedConstTensorHandle managedRecurrentToInputWeights(m_CifgParameters.m_RecurrentToInputWeights);
-    ManagedConstTensorHandle managedInputGateBias(m_CifgParameters.m_InputGateBias);
-
-    // Projection parameters
-    ManagedConstTensorHandle managedProjectionWeights(m_ProjectionParameters.m_ProjectionWeights);
-    ManagedConstTensorHandle managedProjectionBias(m_ProjectionParameters.m_ProjectionBias);
-
-    // Peephole parameters
-    ManagedConstTensorHandle managedCellToInputWeights(m_PeepholeParameters.m_CellToInputWeights);
-    ManagedConstTensorHandle managedCellToForgetWeights(m_PeepholeParameters.m_CellToForgetWeights);
-    ManagedConstTensorHandle managedCellToOutputWeights(m_PeepholeParameters.m_CellToOutputWeights);
-
-    // Layer normalisation parameters
-    ManagedConstTensorHandle managedInputLayerNormWeights(m_LayerNormParameters.m_InputLayerNormWeights);
-    ManagedConstTensorHandle managedForgetLayerNormWeights(m_LayerNormParameters.m_ForgetLayerNormWeights);
-    ManagedConstTensorHandle managedCellLayerNormWeights(m_LayerNormParameters.m_CellLayerNormWeights);
-    ManagedConstTensorHandle managedOutputLayerNormWeights(m_LayerNormParameters.m_OutputLayerNormWeights);
-
-    ConstTensor inputToInputWeightsTensor;
-    if (m_CifgParameters.m_InputToInputWeights != nullptr)
-    {
-        ConstTensor inputToInputWeightsTensorCopy(managedInputToInputWeights.GetTensorInfo(),
-                                                  managedInputToInputWeights.Map());
-        inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
-        inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
-    }
-
-    ConstTensor inputToForgetWeightsTensor;
-    if (m_BasicParameters.m_InputToForgetWeights != nullptr)
-    {
-        ConstTensor inputToForgetWeightsTensorCopy(managedInputToForgetWeights.GetTensorInfo(),
-                                                   managedInputToForgetWeights.Map());
-        inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
-        inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
-    }
-
-    ConstTensor inputToCellWeightsTensor;
-    if (m_BasicParameters.m_InputToCellWeights != nullptr)
-    {
-        ConstTensor inputToCellWeightsTensorCopy(managedInputToCellWeights.GetTensorInfo(),
-                                                 managedInputToCellWeights.Map());
-        inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
-        inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
-    }
-
-    ConstTensor inputToOutputWeightsTensor;
-    if (m_BasicParameters.m_InputToOutputWeights != nullptr)
-    {
-        ConstTensor inputToOutputWeightsTensorCopy(managedInputToOutputWeights.GetTensorInfo(),
-                                                   managedInputToOutputWeights.Map());
-        inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
-        inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
-    }
-
-    ConstTensor recurrentToInputWeightsTensor;
-    if (m_CifgParameters.m_RecurrentToInputWeights != nullptr)
-    {
-        ConstTensor recurrentToInputWeightsTensorCopy(
-                managedRecurrentToInputWeights.GetTensorInfo(),
-                managedRecurrentToInputWeights.Map());
-        recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
-        inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
-    }
-
-    ConstTensor recurrentToForgetWeightsTensor;
-    if (m_BasicParameters.m_RecurrentToForgetWeights != nullptr)
-    {
-        ConstTensor recurrentToForgetWeightsTensorCopy(
-                managedRecurrentToForgetWeights.GetTensorInfo(),
-                managedRecurrentToForgetWeights.Map());
-        recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
-        inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
-    }
-
-    ConstTensor recurrentToCellWeightsTensor;
-    if (m_BasicParameters.m_RecurrentToCellWeights != nullptr)
-    {
-        ConstTensor recurrentToCellWeightsTensorCopy(
-                managedRecurrentToCellWeights.GetTensorInfo(),
-                managedRecurrentToCellWeights.Map());
-        recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
-        inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
-    }
-
-    ConstTensor recurrentToOutputWeightsTensor;
-    if (m_BasicParameters.m_RecurrentToOutputWeights != nullptr)
-    {
-        ConstTensor recurrentToOutputWeightsTensorCopy(
-                managedRecurrentToOutputWeights.GetTensorInfo(),
-                managedRecurrentToOutputWeights.Map());
-        recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
-        inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
-    }
-
-    ConstTensor cellToInputWeightsTensor;
-    if (m_PeepholeParameters.m_CellToInputWeights != nullptr)
-    {
-        ConstTensor cellToInputWeightsTensorCopy(managedCellToInputWeights.GetTensorInfo(),
-                                                 managedCellToInputWeights.Map());
-        cellToInputWeightsTensor = cellToInputWeightsTensorCopy;
-        inputParams.m_CellToInputWeights = &cellToInputWeightsTensor;
-    }
-
-    ConstTensor cellToForgetWeightsTensor;
-    if (m_PeepholeParameters.m_CellToForgetWeights != nullptr)
-    {
-        ConstTensor cellToForgetWeightsTensorCopy(managedCellToForgetWeights.GetTensorInfo(),
-                                                  managedCellToForgetWeights.Map());
-        cellToForgetWeightsTensor = cellToForgetWeightsTensorCopy;
-        inputParams.m_CellToForgetWeights = &cellToForgetWeightsTensor;
-    }
-
-    ConstTensor cellToOutputWeightsTensor;
-    if (m_PeepholeParameters.m_CellToOutputWeights != nullptr)
-    {
-        ConstTensor cellToOutputWeightsTensorCopy(managedCellToOutputWeights.GetTensorInfo(),
-                                                  managedCellToOutputWeights.Map());
-        cellToOutputWeightsTensor = cellToOutputWeightsTensorCopy;
-        inputParams.m_CellToOutputWeights = &cellToOutputWeightsTensor;
-    }
-
-    ConstTensor inputGateBiasTensor;
-    if (m_CifgParameters.m_InputGateBias != nullptr)
-    {
-        ConstTensor inputGateBiasTensorCopy(managedInputGateBias.GetTensorInfo(),
-                                            managedInputGateBias.Map());
-        inputGateBiasTensor = inputGateBiasTensorCopy;
-        inputParams.m_InputGateBias = &inputGateBiasTensor;
-    }
-
-    ConstTensor forgetGateBiasTensor;
-    if (m_BasicParameters.m_ForgetGateBias != nullptr)
-    {
-        ConstTensor forgetGateBiasTensorCopy(managedForgetGateBias.GetTensorInfo(),
-                                             managedForgetGateBias.Map());
-        forgetGateBiasTensor = forgetGateBiasTensorCopy;
-        inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
-    }
-
-    ConstTensor cellBiasTensor;
-    if (m_BasicParameters.m_CellBias != nullptr)
-    {
-        ConstTensor cellBiasTensorCopy(managedCellBias.GetTensorInfo(),
-                                       managedCellBias.Map());
-        cellBiasTensor = cellBiasTensorCopy;
-        inputParams.m_CellBias = &cellBiasTensor;
-    }
-
-    ConstTensor outputGateBias;
-    if (m_BasicParameters.m_OutputGateBias != nullptr)
-    {
-        ConstTensor outputGateBiasCopy(managedOutputGateBias.GetTensorInfo(),
-                                       managedOutputGateBias.Map());
-        outputGateBias = outputGateBiasCopy;
-        inputParams.m_OutputGateBias = &outputGateBias;
-    }
-
-    ConstTensor projectionWeightsTensor;
-    if (m_ProjectionParameters.m_ProjectionWeights != nullptr)
-    {
-        ConstTensor projectionWeightsTensorCopy(managedProjectionWeights.GetTensorInfo(),
-                                                managedProjectionWeights.Map());
-        projectionWeightsTensor = projectionWeightsTensorCopy;
-        inputParams.m_ProjectionWeights = &projectionWeightsTensor;
-    }
-
-    ConstTensor projectionBiasTensor;
-    if (m_ProjectionParameters.m_ProjectionBias != nullptr)
-    {
-        ConstTensor projectionBiasTensorCopy(managedProjectionBias.GetTensorInfo(),
-                                             managedProjectionBias.Map());
-        projectionBiasTensor = projectionBiasTensorCopy;
-        inputParams.m_ProjectionBias = &projectionBiasTensor;
-    }
-
-    ConstTensor inputLayerNormTensor;
-    if (m_LayerNormParameters.m_InputLayerNormWeights != nullptr)
-    {
-        ConstTensor inputLayerNormTensorCopy(managedInputLayerNormWeights.GetTensorInfo(),
-                                             managedInputLayerNormWeights.Map());
-        inputLayerNormTensor = inputLayerNormTensorCopy;
-        inputParams.m_InputLayerNormWeights = &inputLayerNormTensor;
-    }
-
-    ConstTensor forgetLayerNormTensor;
-    if (m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr)
-    {
-        ConstTensor forgetLayerNormTensorCopy(managedForgetLayerNormWeights.GetTensorInfo(),
-                                              managedForgetLayerNormWeights.Map());
-        forgetLayerNormTensor = forgetLayerNormTensorCopy;
-        inputParams.m_ForgetLayerNormWeights = &forgetLayerNormTensor;
-    }
-
-    ConstTensor cellLayerNormTensor;
-    if (m_LayerNormParameters.m_CellLayerNormWeights != nullptr)
-    {
-        ConstTensor cellLayerNormTensorCopy(managedCellLayerNormWeights.GetTensorInfo(),
-                                            managedCellLayerNormWeights.Map());
-        cellLayerNormTensor = cellLayerNormTensorCopy;
-        inputParams.m_CellLayerNormWeights = &cellLayerNormTensor;
-    }
-
-    ConstTensor outputLayerNormTensor;
-    if (m_LayerNormParameters.m_OutputLayerNormWeights != nullptr)
-    {
-        ConstTensor outputLayerNormTensorCopy(managedOutputLayerNormWeights.GetTensorInfo(),
-                                              managedOutputLayerNormWeights.Map());
-        outputLayerNormTensor = outputLayerNormTensorCopy;
-        inputParams.m_OutputLayerNormWeights = &outputLayerNormTensor;
-    }
-
-
-    visitor.VisitQLstmLayer(this, GetParameters(), inputParams, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 
 void QLstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 12774a9..115c47b 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -107,11 +107,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 55f23bf..3ad286e 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -7,8 +7,6 @@
 
 #include "LayerCloneBase.hpp"
 
-#include <armnn/ILayerVisitor.hpp>
-
 namespace armnn
 {
 
@@ -45,11 +43,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void QuantizeLayer::Accept(ILayerVisitor& visitor) const
+void QuantizeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitQuantizeLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index d8898ba..338d5d5 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -12,7 +12,6 @@
 //Forward
 class IWorkload;
 class IWorkloadFactory;
-class ILayerVisitor;
 
 class QuantizeLayer : public Layer
 {
@@ -23,9 +22,7 @@
 
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index e9b9d1c..9d58d25 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -170,145 +170,6 @@
     };
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
-{
-    QuantizedLstmInputParams inputParams;
-
-    ManagedConstTensorHandle managedInputToInputWeights(m_QuantizedLstmParameters.m_InputToInputWeights);
-    ManagedConstTensorHandle managedInputToForgetWeights(m_QuantizedLstmParameters.m_InputToForgetWeights);
-    ManagedConstTensorHandle managedInputToCellWeights(m_QuantizedLstmParameters.m_InputToCellWeights);
-    ManagedConstTensorHandle managedInputToOutputWeights(m_QuantizedLstmParameters.m_InputToOutputWeights);
-
-    ManagedConstTensorHandle managedRecurrentToInputWeights(m_QuantizedLstmParameters.m_RecurrentToInputWeights);
-    ManagedConstTensorHandle managedRecurrentToForgetWeights(m_QuantizedLstmParameters.m_RecurrentToForgetWeights);
-    ManagedConstTensorHandle managedRecurrentToCellWeights(m_QuantizedLstmParameters.m_RecurrentToCellWeights);
-    ManagedConstTensorHandle managedRecurrentToOutputWeights(m_QuantizedLstmParameters.m_RecurrentToOutputWeights);
-
-    ManagedConstTensorHandle managedInputGateBias(m_QuantizedLstmParameters.m_InputGateBias);
-    ManagedConstTensorHandle managedForgetGateBias(m_QuantizedLstmParameters.m_ForgetGateBias);
-    ManagedConstTensorHandle managedCellBias(m_QuantizedLstmParameters.m_CellBias);
-    ManagedConstTensorHandle managedOutputGateBias(m_QuantizedLstmParameters.m_OutputGateBias);
-
-    // InputToX weight tensors
-    ConstTensor inputToInputWeightsTensor;
-    if (m_QuantizedLstmParameters.m_InputToInputWeights != nullptr)
-    {
-        ConstTensor inputToInputWeightsTensorCopy(managedInputToInputWeights.GetTensorInfo(),
-                                                  managedInputToInputWeights.Map());
-        inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
-        inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
-    }
-
-    ConstTensor inputToForgetWeightsTensor;
-    if (m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr)
-    {
-        ConstTensor inputToForgetWeightsTensorCopy(managedInputToForgetWeights.GetTensorInfo(),
-                                                   managedInputToForgetWeights.Map());
-        inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
-        inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
-    }
-
-    ConstTensor inputToCellWeightsTensor;
-    if (m_QuantizedLstmParameters.m_InputToCellWeights != nullptr)
-    {
-        ConstTensor inputToCellWeightsTensorCopy(managedInputToCellWeights.GetTensorInfo(),
-                                                 managedInputToCellWeights.Map());
-        inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
-        inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
-    }
-
-    ConstTensor inputToOutputWeightsTensor;
-    if (m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr)
-    {
-        ConstTensor inputToOutputWeightsTensorCopy(managedInputToOutputWeights.GetTensorInfo(),
-                                                   managedInputToOutputWeights.Map());
-        inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
-        inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
-    }
-
-    // RecurrentToX weight tensors
-    ConstTensor recurrentToInputWeightsTensor;
-    if (m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr)
-    {
-        ConstTensor recurrentToInputWeightsTensorCopy(
-                managedRecurrentToInputWeights.GetTensorInfo(),
-                managedRecurrentToInputWeights.Map());
-        recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
-        inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
-    }
-
-    ConstTensor recurrentToForgetWeightsTensor;
-    if (m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr)
-    {
-        ConstTensor recurrentToForgetWeightsTensorCopy(
-                managedRecurrentToForgetWeights.GetTensorInfo(),
-                managedRecurrentToForgetWeights.Map());
-        recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
-        inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
-    }
-
-    ConstTensor recurrentToCellWeightsTensor;
-    if (m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr)
-    {
-        ConstTensor recurrentToCellWeightsTensorCopy(
-                managedRecurrentToCellWeights.GetTensorInfo(),
-                managedRecurrentToCellWeights.Map());
-        recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
-        inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
-    }
-
-    ConstTensor recurrentToOutputWeightsTensor;
-    if (m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr)
-    {
-        ConstTensor recurrentToOutputWeightsTensorCopy(
-                managedRecurrentToOutputWeights.GetTensorInfo(),
-                managedRecurrentToOutputWeights.Map());
-        recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
-        inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
-    }
-
-    // Bias tensors
-    ConstTensor inputGateBiasTensor;
-    if (m_QuantizedLstmParameters.m_InputGateBias != nullptr)
-    {
-        ConstTensor inputGateBiasTensorCopy(managedInputGateBias.GetTensorInfo(),
-                                            managedInputGateBias.Map());
-        inputGateBiasTensor = inputGateBiasTensorCopy;
-        inputParams.m_InputGateBias = &inputGateBiasTensor;
-    }
-
-    ConstTensor forgetGateBiasTensor;
-    if (m_QuantizedLstmParameters.m_ForgetGateBias != nullptr)
-    {
-        ConstTensor forgetGateBiasTensorCopy(managedForgetGateBias.GetTensorInfo(),
-                                             managedForgetGateBias.Map());
-        forgetGateBiasTensor = forgetGateBiasTensorCopy;
-        inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
-    }
-
-    ConstTensor cellBiasTensor;
-    if (m_QuantizedLstmParameters.m_CellBias != nullptr)
-    {
-        ConstTensor cellBiasTensorCopy(managedCellBias.GetTensorInfo(),
-                                       managedCellBias.Map());
-        cellBiasTensor = cellBiasTensorCopy;
-        inputParams.m_CellBias = &cellBiasTensor;
-    }
-
-    ConstTensor outputGateBiasTensor;
-    if (m_QuantizedLstmParameters.m_OutputGateBias != nullptr)
-    {
-        ConstTensor outputGateBiasCopy(managedOutputGateBias.GetTensorInfo(),
-                                       managedOutputGateBias.Map());
-        outputGateBiasTensor = outputGateBiasCopy;
-        inputParams.m_OutputGateBias = &outputGateBiasTensor;
-    }
-
-    visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     std::vector<ConstTensor> constTensors;
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index fe7d423..8def0f3 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -69,11 +69,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 84d25bf..0f9327b 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -42,13 +42,6 @@
     ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void RankLayer::Accept(ILayerVisitor& visitor) const
-{
-    visitor.VisitRankLayer(this, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void RankLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index 416e1b0..52d14c4 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -22,11 +22,6 @@
 
         void ValidateTensorShapesFromInputs() override;
 
-        ARMNN_NO_DEPRECATE_WARN_BEGIN
-        void Accept(ILayerVisitor& visitor) const override;
-        ARMNN_NO_DEPRECATE_WARN_END
-
-
         void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 1f4387b..aa54bc8 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -102,11 +102,9 @@
     ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ReduceLayer::Accept(ILayerVisitor& visitor) const
+void ReduceLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitReduceLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp
index a6ac44e..e9ea5d8 100644
--- a/src/armnn/layers/ReduceLayer.hpp
+++ b/src/armnn/layers/ReduceLayer.hpp
@@ -27,9 +27,7 @@
     /// will lead to a valid configuration of @ref ReduceLayer.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index b194f7a..c5ec45f 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -53,11 +53,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ReshapeLayer::Accept(ILayerVisitor& visitor) const
+void ReshapeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitReshapeLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index d107b5c..ed114f9 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -45,9 +45,7 @@
                m_Param.m_TargetShape == PolymorphicDowncast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape;
     }
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 89a94f7..188d134 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -75,11 +75,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ResizeLayer::Accept(ILayerVisitor& visitor) const
+void ResizeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitResizeLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index fab18c7..a33573c 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -34,9 +34,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 3a63b7c..0c2cc63 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -48,11 +48,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void RsqrtLayer::Accept(ILayerVisitor& visitor) const
+void RsqrtLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitRsqrtLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index a31aea6..c09be2e 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index ecc112c..dbc0d7a 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -59,13 +59,6 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void ShapeLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("ShapeLayer VisitShapeLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
 
 void ShapeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
diff --git a/src/armnn/layers/ShapeLayer.hpp b/src/armnn/layers/ShapeLayer.hpp
index 35ef873..071b0df 100644
--- a/src/armnn/layers/ShapeLayer.hpp
+++ b/src/armnn/layers/ShapeLayer.hpp
@@ -34,11 +34,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
-
     void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index 0d61181..6362be3 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -59,11 +59,9 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SliceLayer::Accept(ILayerVisitor& visitor) const
+void SliceLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSliceLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index dda66a1..1162e6a 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -34,9 +34,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index a2930e6..b1cb191 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -46,11 +46,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
+void SoftmaxLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSoftmaxLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index 035e7bc..c37ecda 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index a4c6d1b..151b6a5 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -83,11 +83,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
+void SpaceToBatchNdLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 70972bd..e61ec6c 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -35,9 +35,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 51d79f4..f2f0b76 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -77,11 +77,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
+void SpaceToDepthLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index 267ac3b..07764d8 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -35,9 +35,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 42cb6e1..0226a04 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -241,11 +241,9 @@
     }
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SplitterLayer::Accept(ILayerVisitor& visitor) const
+void SplitterLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSplitterLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 1fc37ef..2f868e8 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -43,9 +43,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index b842f1b..3ebacaf 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -95,11 +95,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void StackLayer::Accept(ILayerVisitor& visitor) const
+void StackLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitStackLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 8d38907..973645d 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -34,9 +34,7 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index ccf1529..e0d057e 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -41,10 +41,9 @@
     // so do nothing here.
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void StandInLayer::Accept(ILayerVisitor& visitor) const
+void StandInLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitStandInLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
+
 } // namespace armnn
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index bb50006..9487ff8 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -35,11 +35,7 @@
     /// @return Does not return anything. Throws Exception if called.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    /// Accepts a visitor object and calls VisitStandInLayer() method.
-    /// @param visitor The visitor on which to call VisitStandInLayer() method.
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index 56051c2..a179531 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -111,11 +111,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
+void StridedSliceLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitStridedSliceLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 7e17cb2..888ae7e 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -34,9 +34,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index 8e9b173..0e92013 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -32,11 +32,9 @@
     return CloneBase<SubtractionLayer>(graph, GetName());
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SubtractionLayer::Accept(ILayerVisitor& visitor) const
+void SubtractionLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSubtractionLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 8c31479..86d5f9e 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -24,9 +24,7 @@
     /// @param [in] graph The graph into which this layer is being cloned.
     SubtractionLayer* Clone(Graph& graph) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index afa4d52..c2022fd 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -52,11 +52,9 @@
             GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void SwitchLayer::Accept(ILayerVisitor& visitor) const
+void SwitchLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitSwitchLayer(this, GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index a36261b..4af82f2 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -28,10 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 protected:
     /// Constructor to create a SwitchLayer.
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 1cbaf34..eec42fb 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -122,24 +122,6 @@
     return {m_Weight, m_Bias};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
-{
-    ManagedConstTensorHandle managedWeight(m_Weight);
-    ConstTensor weightsTensor(managedWeight.GetTensorInfo(), managedWeight.Map());
-
-    Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
-    ManagedConstTensorHandle managedBias(m_Bias);
-    if (GetParameters().m_BiasEnabled)
-    {
-        ConstTensor biasTensor(managedBias.GetTensorInfo(), managedBias.Map());
-        optionalBiasTensor = Optional<ConstTensor>(biasTensor);
-    }
-
-    visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     ManagedConstTensorHandle managedWeight(m_Weight);
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index b6db41c..1fa2902 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -40,10 +40,6 @@
     /// @return A vector of the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 3340b9d..bc9e0ac 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -57,11 +57,9 @@
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void TransposeLayer::Accept(ILayerVisitor& visitor) const
+void TransposeLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    visitor.VisitTransposeLayer(this, GetParameters(), GetName());
+    strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index 8449db4..08268f2 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -58,9 +58,7 @@
                GetPermutation().IsEqual(PolymorphicDowncast<const TransposeLayer*>(&other)->GetPermutation());
     }
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index e5f89bd..857f369 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -308,14 +308,6 @@
             m_LayerNormParameters.m_OutputLayerNormWeights};
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void UnidirectionalSequenceLstmLayer::Accept(ILayerVisitor& visitor) const
-{
-    IgnoreUnused(visitor);
-    throw armnn::Exception("UnidirectionalSequenceLstmLayer: VisitUnidirectionalSequenceLstmLayer is not implemented");
-}
-ARMNN_NO_DEPRECATE_WARN_END
-
 void UnidirectionalSequenceLstmLayer::ExecuteStrategy(IStrategy& strategy) const
 {
     std::vector<ConstTensor> constTensors;
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
index 857d277..60b6893 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.hpp
@@ -44,10 +44,6 @@
     /// @return A vector to the inferred output shape.
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
-
 
     void ExecuteStrategy(IStrategy& strategy) const override;
 
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index 9705e3f..cfbde21 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -41,12 +41,10 @@
     ARMNN_ASSERT(GetNumOutputSlots() == 0);
 }
 
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-void UnmapLayer::Accept(ILayerVisitor& visitor) const
+void UnmapLayer::ExecuteStrategy(IStrategy& strategy) const
 {
-    IgnoreUnused(visitor);
+    IgnoreUnused(strategy);
     throw armnn::Exception("UnmapLayer should not appear in an input graph");
 }
-ARMNN_NO_DEPRECATE_WARN_END
 
 } // namespace armnn
diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp
index 3d1d115..8c8aecd 100644
--- a/src/armnn/layers/UnmapLayer.hpp
+++ b/src/armnn/layers/UnmapLayer.hpp
@@ -28,9 +28,7 @@
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
     void ValidateTensorShapesFromInputs() override;
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    void Accept(ILayerVisitor& visitor) const override;
-    ARMNN_NO_DEPRECATE_WARN_END
+    void ExecuteStrategy(IStrategy& strategy) const override;
 
 
 protected:
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 9d98104..058f079 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -5,7 +5,6 @@
 
 #include <GraphUtils.hpp>
 
-#include <armnn/LayerVisitorBase.hpp>
 
 #include <Network.hpp>
 
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 2ea3c2a..d5d506d 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -5,7 +5,6 @@
 
 #include <doctest/doctest.h>
 
-#include <armnn/LayerVisitorBase.hpp>
 
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IBackendInternal.hpp>
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 1a0978f..216f4dc 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -4,9 +4,7 @@
 //
 #pragma once
 
-#include <armnn/ILayerVisitor.hpp>
 #include <armnn/IStrategy.hpp>
-#include <armnn/LayerVisitorBase.hpp>
 
 #include <armnnSerializer/ISerializer.hpp>