IVGCVSW-6629 Stabilize the ILayerSupport interface with unified strategy.

  * New Virtual Function Added.
  * Implemented in Ref Neon CL with switch statement for all layers.
  * Deprecate original IsXXXLayerSupported functions.
  * Ensure Npu not broken with change.

Change-Id: Icf61b16beec83d6af1cb287e24ab1e98a6138c8c
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 220590e..89a0772 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -4,13 +4,13 @@
 //
 
 #include <armnn/Deprecated.hpp>
-#include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
 #include <armnn/Types.hpp>
 
 #include <backendsCommon/LayerSupportBase.hpp>
 
 #include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 namespace
 {
@@ -37,6 +37,51 @@
 namespace armnn
 {
 
+bool LayerSupportBase::IsLayerSupported(const LayerType& type,
+                                        const std::vector<TensorInfo>& infos,
+                                        const BaseDescriptor& descriptor,
+                                        const Optional<LstmInputParamsInfo>&,
+                                        const Optional<QuantizedLstmInputParamsInfo>&,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    switch(type)
+    {
+        case LayerType::MemCopy:
+            return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::MemImport:
+            return IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::StandIn:
+        {
+            auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor));
+
+            if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs))
+            {
+                throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal "
+                                               "the combined number of input and output slots assigned "
+                                               "to the StandIn descriptor");
+            }
+
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < desc.m_NumInputs; i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            std::vector<const TensorInfo*> outputInfos;
+            for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++)
+            {
+                outputInfos.push_back(&infos[i]);
+            }
+
+            return IsStandInSupported(inputInfos,
+                                      outputInfos,
+                                      desc,
+                                      reasonIfUnsupported);
+        }
+        default:
+            return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+    }
+}
+
 bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input
                                              const TensorInfo&, //output
                                              const ActivationDescriptor&, // descriptor
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index ef947aa..3d9c968 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -13,21 +13,33 @@
 class LayerSupportBase : public ILayerSupport
 {
 public:
+
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& descriptor,
+                          const Optional<LstmInputParamsInfo>& lstmParamsInfo = EmptyOptional(),
+                          const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo = EmptyOptional(),
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsAdditionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsArgMinMaxSupported(const TensorInfo& input,
                               const TensorInfo& output,
                               const ArgMinMaxDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsBatchNormalizationSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const TensorInfo& mean,
@@ -37,31 +49,37 @@
                                        const BatchNormalizationDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsBatchToSpaceNdSupported(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const BatchToSpaceNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsCastSupported(const TensorInfo& input,
                          const TensorInfo& output,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsChannelShuffleSupported(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const ChannelShuffleDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsComparisonSupported(const TensorInfo& input0,
                                const TensorInfo& input1,
                                const TensorInfo& output,
                                const ComparisonDescriptor& descriptor,
                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
                            const OriginsDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsConstantSupported(const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
@@ -69,6 +87,7 @@
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -77,11 +96,13 @@
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsConvertFp32ToFp16Supported(
             const TensorInfo& input,
             const TensorInfo& output,
             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsConvolution2dSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const Convolution2dDescriptor& descriptor,
@@ -89,6 +110,7 @@
                                   const Optional<TensorInfo>& biases,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsConvolution3dSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const Convolution3dDescriptor& descriptor,
@@ -96,15 +118,18 @@
                                   const Optional<TensorInfo>& biases,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsDebugSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsDepthToSpaceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const DepthToSpaceDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const DepthwiseConvolution2dDescriptor& descriptor,
@@ -112,6 +137,7 @@
                                          const Optional<TensorInfo>& biases,
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsDequantizeSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -126,6 +152,7 @@
                                          const DetectionPostProcessDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
                                                 const TensorInfo& output,
                                                 const DepthwiseConvolution2dDescriptor& descriptor,
@@ -134,29 +161,35 @@
                                                 Optional<std::string&> reasonIfUnsupported =
                                                     EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsDivisionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsElementwiseUnarySupported(const TensorInfo& input,
                                      const TensorInfo& output,
                                      const ElementwiseUnaryDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     virtual bool IsFillSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const FillDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsFloorSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsFullyConnectedSupported(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const TensorInfo& weights,
@@ -164,42 +197,50 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsGatherSupported(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output,
                            const GatherDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsInstanceNormalizationSupported(
         const TensorInfo& input,
         const TensorInfo& output,
         const InstanceNormalizationDescriptor& descriptor,
         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsL2NormalizationSupported(const TensorInfo& input,
                                     const TensorInfo& output,
                                     const L2NormalizationDescriptor& descriptor,
                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsLogicalBinarySupported(const TensorInfo& input0,
                                   const TensorInfo& input1,
                                   const TensorInfo& output,
                                   const LogicalBinaryDescriptor& descriptor,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsLogicalUnarySupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const ElementwiseUnaryDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsLogSoftmaxSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const LogSoftmaxDescriptor& descriptor,
                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsLstmSupported(const TensorInfo& input,
                          const TensorInfo& outputStateIn,
                          const TensorInfo& cellStateIn,
@@ -211,11 +252,13 @@
                          const LstmInputParamsInfo& paramsInfo,
                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsMaximumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsMeanSupported(const TensorInfo& input,
                          const TensorInfo& output,
                          const MeanDescriptor& descriptor,
@@ -234,57 +277,69 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsMinimumSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
                             const TensorInfo& output,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsMultiplicationSupported(const TensorInfo& input0,
                                    const TensorInfo& input1,
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsNormalizationSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const NormalizationDescriptor& descriptor,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsOutputSupported(const TensorInfo& output,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsPadSupported(const TensorInfo& input,
                         const TensorInfo& output,
                         const PadDescriptor& descriptor,
                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsPermuteSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const PermuteDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsPooling2dSupported(const TensorInfo& input,
                               const TensorInfo& output,
                               const Pooling2dDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsPooling3dSupported(const TensorInfo& input,
                               const TensorInfo& output,
                               const Pooling3dDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsPreCompiledSupported(const TensorInfo& input,
                                 const PreCompiledDescriptor& descriptor,
                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsPreluSupported(const TensorInfo& input,
                           const TensorInfo& alpha,
                           const TensorInfo& output,
                           Optional<std::string &> reasonIfUnsupported) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsQuantizeSupported(const TensorInfo& input,
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsQLstmSupported(const TensorInfo& input,
                           const TensorInfo& previousOutputIn,
                           const TensorInfo& previousCellStateIn,
@@ -303,20 +358,24 @@
                                   const QuantizedLstmInputParamsInfo& paramsInfo,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsRankSupported(const TensorInfo& input,
                          const TensorInfo& output,
                          Optional<std::string&> reasonIfUnsupported) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsReduceSupported(const TensorInfo& input,
                            const TensorInfo& output,
                            const ReduceDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsReshapeSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsResizeSupported(const TensorInfo& input,
                            const TensorInfo& output,
                            const ResizeDescriptor& descriptor,
@@ -326,31 +385,37 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSliceSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           const SliceDescriptor& descriptor,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSpaceToBatchNdSupported(const TensorInfo& input,
                                    const TensorInfo& output,
                                    const SpaceToBatchNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSpaceToDepthSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const SpaceToDepthDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSplitterSupported(const TensorInfo& input,
                              const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
                           const TensorInfo& output,
                           const StackDescriptor& descriptor,
@@ -361,22 +426,26 @@
                             const StandInDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsStridedSliceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const StridedSliceDescriptor& descriptor,
                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSubtractionSupported(const TensorInfo& input0,
                                 const TensorInfo& input1,
                                 const TensorInfo& output,
                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsSwitchSupported(const TensorInfo& input0,
                            const TensorInfo& input1,
                            const TensorInfo& output0,
                            const TensorInfo& output1,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsTransposeConvolution2dSupported(
         const TensorInfo& input,
         const TensorInfo& output,
@@ -385,11 +454,13 @@
         const Optional<TensorInfo>& biases,
         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsTransposeSupported(const TensorInfo& input,
                               const TensorInfo& output,
                               const TransposeDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
     bool IsUnidirectionalSequenceLstmSupported(
         const TensorInfo& input,
         const TensorInfo& outputStateIn,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 49e9079..0d06595 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1450,12 +1450,11 @@
     TensorInfo outputInfo(outputShape, DataType::Float32);
     TensorInfo weightInfo(weightShape, DataType::Float32);
     Convolution2dDescriptor convolution2dDescriptor;
+    std::vector<TensorInfo> infos = {inputInfo, outputInfo, weightInfo, TensorInfo()};
     bool referenceConvolution2dSupported =
-            referenceLayerSupport->IsConvolution2dSupported(inputInfo,
-                                                            outputInfo,
-                                                            convolution2dDescriptor,
-                                                            weightInfo,
-                                                            EmptyOptional());
+             referenceLayerSupport->IsLayerSupported(LayerType::Convolution2d,
+                                                     infos,
+                                                     convolution2dDescriptor);
     CHECK(referenceConvolution2dSupported);
 
     // Test the backend instance by creating a workload
@@ -1535,12 +1534,11 @@
     TensorInfo outputInfo(outputShape, DataType::Float32);
     TensorInfo weightInfo(weightShape, DataType::Float32);
     Convolution2dDescriptor convolution2dDescriptor;
-    bool sampleConvolution2dSupported =
-            sampleLayerSupport->IsConvolution2dSupported(inputInfo,
-                                                         outputInfo,
-                                                         convolution2dDescriptor,
-                                                         weightInfo,
-                                                         EmptyOptional());
+    std::vector<TensorInfo> infos = {inputInfo, outputInfo, weightInfo, TensorInfo()};
+    bool referenceConvolution2dSupported =
+             referenceLayerSupport->IsLayerSupported(LayerType::Convolution2d,
+                                                     infos,
+                                                     convolution2dDescriptor);
     CHECK(!sampleConvolution2dSupported);
 
     // Test the backend instance by creating a workload
diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp
index 3a5e79a..df133df 100644
--- a/src/backends/backendsCommon/test/MockBackend.hpp
+++ b/src/backends/backendsCommon/test/MockBackend.hpp
@@ -172,6 +172,55 @@
 class MockLayerSupport : public LayerSupportBase
 {
 public:
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& descriptor,
+                          const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
+                          const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
+                          Optional<std::string&> reasonIfUnsupported) const override
+    {
+        switch(type)
+        {
+            case LayerType::Input:
+                return IsInputSupported(infos[0], reasonIfUnsupported);
+            case LayerType::Output:
+                return IsOutputSupported(infos[0], reasonIfUnsupported);
+            case LayerType::Addition:
+                return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+            case LayerType::Convolution2d:
+            {
+                if (infos.size() != 4)
+                {
+                    throw InvalidArgumentException("Invalid number of TransposeConvolution2d "
+                                                   "TensorInfos. TensorInfos should be of format: "
+                                                   "{input, output, weights, biases}.");
+                }
+
+                auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+                if (infos[3] == TensorInfo())
+                {
+                    return IsConvolution2dSupported(infos[0],
+                                                    infos[1],
+                                                    desc,
+                                                    infos[2],
+                                                    EmptyOptional(),
+                                                    reasonIfUnsupported);
+                }
+                else
+                {
+                    return IsConvolution2dSupported(infos[0],
+                                                    infos[1],
+                                                    desc,
+                                                    infos[2],
+                                                    infos[3],
+                                                    reasonIfUnsupported);
+                }
+            }
+            default:
+                return false;
+        }
+    }
+
     bool IsInputSupported(const TensorInfo& /*input*/,
                           Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
     {
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
index 75e637e..380ce4a 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
@@ -14,6 +14,30 @@
 class MockImportLayerSupport : public LayerSupportBase
 {
 public:
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& /*descriptor*/,
+                          const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
+                          const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
+                          Optional<std::string&> reasonIfUnsupported) const override
+    {
+        switch(type)
+        {
+            case LayerType::Addition:
+                return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+            case LayerType::Input:
+                return IsInputSupported(infos[0], reasonIfUnsupported);
+            case LayerType::Output:
+                return IsOutputSupported(infos[0], reasonIfUnsupported);
+            case LayerType::MemCopy:
+                return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+            case LayerType::MemImport:
+                return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+            default:
+                return false;
+        }
+    }
+
     bool IsAdditionSupported(const TensorInfo& input0,
                              const TensorInfo& input1,
                              const TensorInfo& output,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index afcaf56..e5204e4 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -7,7 +7,6 @@
 #include "ClBackendId.hpp"
 #include "ClBackendModelContext.hpp"
 
-#include <armnn/Descriptors.hpp>
 #include <armnn/BackendRegistry.hpp>
 
 #include <InternalTypes.hpp>
@@ -177,6 +176,415 @@
 {
 }
 
+bool ClLayerSupport::IsLayerSupported(const LayerType& type,
+                                      const std::vector<TensorInfo>& infos,
+                                      const BaseDescriptor& descriptor,
+                                      const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+                                      const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    switch (type)
+    {
+        case LayerType::Activation:
+            return IsActivationSupported(infos[0],
+                                         infos[1],
+                                         *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Addition:
+            return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ArgMinMax:
+            return IsArgMinMaxSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::BatchNormalization:
+            return IsBatchNormalizationSupported(infos[0],
+                                                 infos[1],
+                                                 infos[2],
+                                                 infos[3],
+                                                 infos[4],
+                                                 infos[5],
+                                                 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+                                                     (&descriptor)),
+                                                 reasonIfUnsupported);
+        case LayerType::BatchToSpaceNd:
+            return IsBatchToSpaceNdSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Comparison:
+            return IsComparisonSupported(infos[0],
+                                         infos[1],
+                                         infos[2],
+                                         *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Concat:
+        {
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < (infos.size() - 1); i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            return IsConcatSupported(inputInfos,
+                                     infos[infos.size() - 1],
+                                     *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        }
+        case LayerType::Constant:
+            return IsConstantSupported(infos[0], reasonIfUnsupported);
+        case LayerType::ConvertFp16ToFp32:
+            return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp32ToFp16:
+            return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Convolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsConvolution2dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsConvolution2dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                infos[3],
+                                                reasonIfUnsupported);
+            }
+        }
+        case LayerType::DepthToSpace:
+            return IsDepthToSpaceSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::DepthwiseConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsDepthwiseConvolutionSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsDepthwiseConvolutionSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         infos[3],
+                                                         reasonIfUnsupported);
+            }
+        }
+        case LayerType::Dequantize:
+            return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Division:
+            return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ElementwiseUnary:
+            return IsElementwiseUnarySupported(infos[0],
+                                               infos[1],
+                                               *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+                                               reasonIfUnsupported);
+        case LayerType::Fill:
+            return IsFillSupported(infos[0],
+                                   infos[1],
+                                   *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+                                   reasonIfUnsupported);
+        case LayerType::Floor:
+            return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::FullyConnected:
+            return IsFullyConnectedSupported(infos[0],
+                                             infos[1],
+                                             infos[2],
+                                             infos[3],
+                                             *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Gather:
+            return IsGatherSupported(infos[0],
+                                     infos[1],
+                                     infos[2],
+                                     *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Input:
+            return IsInputSupported(infos[0], reasonIfUnsupported);
+        case LayerType::InstanceNormalization:
+            return IsInstanceNormalizationSupported(infos[0],
+                                                    infos[1],
+                                                    *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+                                                        (&descriptor)),
+                                                    reasonIfUnsupported);
+        case LayerType::L2Normalization:
+            return IsL2NormalizationSupported(infos[0],
+                                              infos[1],
+                                              *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+                                              reasonIfUnsupported);
+        case LayerType::LogicalBinary:
+            return IsLogicalBinarySupported(infos[0],
+                                            infos[1],
+                                            infos[2],
+                                            *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+                                            reasonIfUnsupported);
+        case LayerType::LogSoftmax:
+            return IsLogSoftmaxSupported(infos[0],
+                                         infos[1],
+                                         *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Lstm:
+            return IsLstmSupported(infos[0],
+                                   infos[1],
+                                   infos[2],
+                                   infos[3],
+                                   infos[4],
+                                   infos[5],
+                                   infos[6],
+                                   *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+                                   lstmParamsInfo.value(),
+                                   reasonIfUnsupported);
+        case LayerType::QLstm:
+            return IsQLstmSupported(infos[0],
+                                    infos[1],
+                                    infos[2],
+                                    infos[3],
+                                    infos[4],
+                                    infos[5],
+                                    *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+                                    lstmParamsInfo.value(),
+                                    reasonIfUnsupported);
+        case LayerType::Maximum:
+            return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Mean:
+            return IsMeanSupported(infos[0],
+                                   infos[1],
+                                   *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+                                   reasonIfUnsupported);
+        case LayerType::Minimum:
+            return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Multiplication:
+            return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Normalization:
+            return IsNormalizationSupported(infos[0],
+                                            infos[1],
+                                            *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+                                            reasonIfUnsupported);
+        case LayerType::Output:
+            return IsOutputSupported(infos[0], reasonIfUnsupported);
+        case LayerType::Pad:
+            return IsPadSupported(infos[0],
+                                  infos[1],
+                                  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+                                  reasonIfUnsupported);
+        case LayerType::Permute:
+            return IsPermuteSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::Pooling2d:
+            return IsPooling2dSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::Prelu:
+            return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Quantize:
+            return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::QuantizedLstm:
+            return IsQuantizedLstmSupported(infos[0],
+                                            infos[1],
+                                            infos[2],
+                                            infos[3],
+                                            infos[4],
+                                            quantizedLstmParamsInfo.value(),
+                                            reasonIfUnsupported);
+        case LayerType::Reshape:
+            return IsReshapeSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::Resize:
+            return IsResizeSupported(infos[0],
+                                     infos[1],
+                                     *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Reduce:
+            return IsReduceSupported(infos[0],
+                                     infos[1],
+                                     *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Slice:
+            return IsSliceSupported(infos[0],
+                                    infos[1],
+                                    *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+                                    reasonIfUnsupported);
+        case LayerType::Softmax:
+            return IsSoftmaxSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::SpaceToBatchNd:
+            return IsSpaceToBatchNdSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::SpaceToDepth:
+            return IsSpaceToDepthSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::Splitter:
+        {
+            std::vector<TensorInfo> outputInfos;
+            for (uint32_t i = 1; i < infos.size(); i++)
+            {
+                outputInfos.push_back(infos[i]);
+            }
+            return IsSplitterSupported(infos[0],
+                                       {outputInfos.begin(), outputInfos.end()},
+                                       *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+                                       reasonIfUnsupported);
+        }
+        case LayerType::Stack:
+        {
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < infos.size() - 1; i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            return IsStackSupported(inputInfos,
+                                    infos[infos.size() - 1],
+                                    *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+                                    reasonIfUnsupported);
+        }
+        case LayerType::StridedSlice:
+            return IsStridedSliceSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::Subtraction:
+            return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Transpose:
+            return IsTransposeSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::TransposeConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsTransposeConvolution2dSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         EmptyOptional(),
+                                                         reasonIfUnsupported);
+            }
+            else
+            {
+                return IsTransposeConvolution2dSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         infos[3],
+                                                         reasonIfUnsupported);
+            }
+        }
+        case LayerType::Cast:
+            return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ChannelShuffle:
+            return IsChannelShuffleSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Convolution3d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsConvolution3dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsConvolution3dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                infos[3],
+                                                reasonIfUnsupported);
+            }
+        }
+        case LayerType::MemCopy:
+            return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::MemImport:
+            return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Map:
+            return true;
+        case LayerType::Unmap:
+            return true;
+        case LayerType::Merge:
+            return LayerSupportBase::IsMergeSupported(infos[0],
+                                                      infos[1],
+                                                      infos[2],
+                                                      reasonIfUnsupported);
+        case LayerType::Rank:
+            return true;
+        case LayerType::Shape:
+            return LayerSupportBase::IsShapeSupported(infos[0],
+                                                      infos[1],
+                                                      reasonIfUnsupported);
+        case LayerType::ConvertBf16ToFp32:
+            return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0],
+                                                                  infos[1],
+                                                                  reasonIfUnsupported);
+        case LayerType::ConvertFp32ToBf16:
+            return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0],
+                                                                  infos[1],
+                                                                  reasonIfUnsupported);
+        default:
+            // layers not supported in cl by default:
+            // debug, detectionpostprocess, fakequantization, precompiled,
+            // standin, switch, unidirectionalsequencelstm, pooling3d
+            return false;
+    }
+}
+
 bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
                                            const TensorInfo& output,
                                            const ActivationDescriptor& descriptor,
@@ -286,7 +694,7 @@
 
 bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                                        const TensorInfo& output,
-                                       const ConcatDescriptor& descriptor,
+                                       const OriginsDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
     if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index e75aeda..0300fc0 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -18,6 +18,13 @@
     ClLayerSupport();
     ~ClLayerSupport() {}
 
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& descriptor,
+                          const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+                          const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+                          Optional<std::string&> reasonIfUnsupported) const override;
+
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -64,7 +71,7 @@
 
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const ConcatDescriptor& descriptor,
+                           const OriginsDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConstantSupported(const TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index d5dd238..2b2229a 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -7,7 +7,6 @@
 #include "NeonBackendId.hpp"
 #include "NeonBackendModelContext.hpp"
 
-#include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
 #include <armnn/Tensor.hpp>
 #include <armnn/Types.hpp>
@@ -146,6 +145,424 @@
 {
 }
 
+bool NeonLayerSupport::IsLayerSupported(const LayerType& type,
+                                        const std::vector<TensorInfo>& infos,
+                                        const BaseDescriptor& descriptor,
+                                        const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+                                        const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    switch (type)
+    {
+        case LayerType::Activation:
+            return IsActivationSupported(infos[0],
+                                         infos[1],
+                                         *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Addition:
+            return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ArgMinMax:
+            return IsArgMinMaxSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::BatchNormalization:
+            return IsBatchNormalizationSupported(infos[0],
+                                                 infos[1],
+                                                 infos[2],
+                                                 infos[3],
+                                                 infos[4],
+                                                 infos[5],
+                                                 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+                                                     (&descriptor)),
+                                                 reasonIfUnsupported);
+        case LayerType::BatchToSpaceNd:
+            return IsBatchToSpaceNdSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Comparison:
+            return IsComparisonSupported(infos[0],
+                                         infos[1],
+                                         infos[2],
+                                         *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Concat:
+        {
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < (infos.size() - 1); i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            return IsConcatSupported(inputInfos,
+                                     infos[infos.size() - 1],
+                                     *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        }
+        case LayerType::Constant:
+            return IsConstantSupported(infos[0], reasonIfUnsupported);
+        case LayerType::ConvertBf16ToFp32:
+            return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp16ToFp32:
+            return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp32ToBf16:
+            return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp32ToFp16:
+            return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Convolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsConvolution2dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsConvolution2dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                infos[3],
+                                                reasonIfUnsupported);
+            }
+        }
+        case LayerType::DepthToSpace:
+            return IsDepthToSpaceSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::DepthwiseConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsDepthwiseConvolutionSupported(infos[0],
+                                                       infos[1],
+                                                       desc,
+                                                       infos[2],
+                                                       EmptyOptional(),
+                                                       reasonIfUnsupported);
+            }
+            else
+            {
+                return IsDepthwiseConvolutionSupported(infos[0],
+                                                       infos[1],
+                                                       desc,
+                                                       infos[2],
+                                                       infos[3],
+                                                       reasonIfUnsupported);
+            }
+        }
+        case LayerType::Dequantize:
+            return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Division:
+            return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ElementwiseUnary:
+            return IsElementwiseUnarySupported(infos[0],
+                                               infos[1],
+                                               *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+                                               reasonIfUnsupported);
+        case LayerType::Fill:
+            return IsFillSupported(infos[0],
+                                   infos[1],
+                                   *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+                                   reasonIfUnsupported);
+        case LayerType::Floor:
+            return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::FullyConnected:
+            return IsFullyConnectedSupported(infos[0],
+                                             infos[1],
+                                             infos[2],
+                                             infos[3],
+                                             *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Gather:
+            return IsGatherSupported(infos[0],
+                                     infos[1],
+                                     infos[2],
+                                     *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Input:
+            return IsInputSupported(infos[0], reasonIfUnsupported);
+        case LayerType::InstanceNormalization:
+            return IsInstanceNormalizationSupported(infos[0],
+                                                    infos[1],
+                                                    *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+                                                        (&descriptor)),
+                                                    reasonIfUnsupported);
+        case LayerType::L2Normalization:
+            return IsL2NormalizationSupported(infos[0],
+                                              infos[1],
+                                              *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+                                              reasonIfUnsupported);
+        case LayerType::LogicalBinary:
+            return IsLogicalBinarySupported(infos[0],
+                                            infos[1],
+                                            infos[2],
+                                            *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+                                            reasonIfUnsupported);
+        case LayerType::LogSoftmax:
+            return IsLogSoftmaxSupported(infos[0],
+                                         infos[1],
+                                         *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Lstm:
+            return IsLstmSupported(infos[0],
+                                   infos[1],
+                                   infos[2],
+                                   infos[3],
+                                   infos[4],
+                                   infos[5],
+                                   infos[6],
+                                   *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+                                   lstmParamsInfo.value(),
+                                   reasonIfUnsupported);
+        case LayerType::QLstm:
+            return IsQLstmSupported(infos[0],
+                                    infos[1],
+                                    infos[2],
+                                    infos[3],
+                                    infos[4],
+                                    infos[5],
+                                    *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+                                    lstmParamsInfo.value(),
+                                    reasonIfUnsupported);
+        case LayerType::Maximum:
+            return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Mean:
+            return IsMeanSupported(infos[0],
+                                   infos[1],
+                                   *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+                                   reasonIfUnsupported);
+        case LayerType::Minimum:
+            return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Multiplication:
+            return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Normalization:
+            return IsNormalizationSupported(infos[0],
+                                            infos[1],
+                                            *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+                                            reasonIfUnsupported);
+        case LayerType::Output:
+            return IsOutputSupported(infos[0], reasonIfUnsupported);
+        case LayerType::Pad:
+            return IsPadSupported(infos[0],
+                                  infos[1],
+                                  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+                                  reasonIfUnsupported);
+        case LayerType::Permute:
+            return IsPermuteSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::Pooling2d:
+            return IsPooling2dSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::Prelu:
+            return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Quantize:
+            return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::QuantizedLstm:
+            return IsQuantizedLstmSupported(infos[0],
+                                            infos[1],
+                                            infos[2],
+                                            infos[3],
+                                            infos[4],
+                                            quantizedLstmParamsInfo.value(),
+                                            reasonIfUnsupported);
+        case LayerType::Reshape:
+            return IsReshapeSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::Resize:
+            return IsResizeSupported(infos[0],
+                                     infos[1],
+                                     *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Reduce:
+            return IsReduceSupported(infos[0],
+                                     infos[1],
+                                     *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Slice:
+            return IsSliceSupported(infos[0],
+                                    infos[1],
+                                    *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+                                    reasonIfUnsupported);
+        case LayerType::Softmax:
+            return IsSoftmaxSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::SpaceToBatchNd:
+            return IsSpaceToBatchNdSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::SpaceToDepth:
+            return IsSpaceToDepthSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::Splitter:
+        {
+            std::vector<TensorInfo> outputInfos;
+            for (uint32_t i = 1; i < infos.size(); i++)
+            {
+                outputInfos.push_back(infos[i]);
+            }
+            return IsSplitterSupported(infos[0],
+                                       {outputInfos.begin(), outputInfos.end()},
+                                       *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+                                       reasonIfUnsupported);
+        }
+        case LayerType::Stack:
+        {
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < infos.size() - 1; i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            return IsStackSupported(inputInfos,
+                                    infos[infos.size() - 1],
+                                    *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+                                    reasonIfUnsupported);
+        }
+        case LayerType::StridedSlice:
+            return IsStridedSliceSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::Subtraction:
+            return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Transpose:
+            return IsTransposeSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::TransposeConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsTransposeConvolution2dSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         EmptyOptional(),
+                                                         reasonIfUnsupported);
+            }
+            else
+            {
+                return IsTransposeConvolution2dSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         infos[3],
+                                                         reasonIfUnsupported);
+            }
+        }
+        case LayerType::Cast:
+            return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ChannelShuffle:
+            return IsChannelShuffleSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Convolution3d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsConvolution3dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsConvolution3dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                infos[3],
+                                                reasonIfUnsupported);
+            }
+        }
+        case LayerType::MemCopy:
+            return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::MemImport:
+            return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::DetectionPostProcess:
+        {
+            auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
+            return LayerSupportBase::IsDetectionPostProcessSupported(infos[0],
+                                                                     infos[1],
+                                                                     infos[2],
+                                                                     infos[3],
+                                                                     infos[4],
+                                                                     infos[5],
+                                                                     infos[6],
+                                                                     desc,
+                                                                     reasonIfUnsupported);
+        }
+        case LayerType::Map:
+            return true;
+        case LayerType::Unmap:
+            return true;
+        case LayerType::Merge:
+            return LayerSupportBase::IsMergeSupported(infos[0],
+                                                      infos[1],
+                                                      infos[2],
+                                                      reasonIfUnsupported);
+        case LayerType::Rank:
+            return true;
+        case LayerType::Shape:
+            return LayerSupportBase::IsShapeSupported(infos[0],
+                                                      infos[1],
+                                                      reasonIfUnsupported);
+        default:
+            // layers not supported in neon by default:
+            // debug, fakequantization, precompiled, standin,
+            // switch, unidirectionalsequencelstm, pooling3d
+            return false;
+    }
+}
+
 bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
                                              const TensorInfo& output,
                                              const ActivationDescriptor& descriptor,
@@ -256,7 +673,7 @@
 
 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                                          const TensorInfo& output,
-                                         const ConcatDescriptor& descriptor,
+                                         const OriginsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
     if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 16507c5..afa9b41 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -19,6 +19,13 @@
 
     ~NeonLayerSupport() {}
 
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& descriptor,
+                          const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+                          const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+                          Optional<std::string&> reasonIfUnsupported) const override;
+
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -65,7 +72,7 @@
 
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const ConcatDescriptor& descriptor,
+                           const OriginsDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConstantSupported(const TensorInfo& output,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 4d4f014..f5798c8 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -7,9 +7,9 @@
 
 #include <armnn/TypesUtils.hpp>
 #include <armnn/Types.hpp>
-#include <armnn/Descriptors.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <LayerSupportCommon.hpp>
 #include <backendsCommon/LayerSupportRules.hpp>
@@ -58,6 +58,488 @@
 
 } // anonymous namespace
 
+bool RefLayerSupport::IsLayerSupported(const LayerType& type,
+                                       const std::vector<TensorInfo>& infos,
+                                       const BaseDescriptor& descriptor,
+                                       const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+                                       const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    switch (type)
+    {
+        case LayerType::Activation:
+            return IsActivationSupported(infos[0],
+                                         infos[1],
+                                         *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Addition:
+            return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ArgMinMax:
+            return IsArgMinMaxSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::BatchNormalization:
+            return IsBatchNormalizationSupported(infos[0],
+                                                 infos[1],
+                                                 infos[2],
+                                                 infos[3],
+                                                 infos[4],
+                                                 infos[5],
+                                                 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+                                                     (&descriptor)),
+                                                 reasonIfUnsupported);
+        case LayerType::BatchToSpaceNd:
+            return IsBatchToSpaceNdSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Comparison:
+            return IsComparisonSupported(infos[0],
+                                         infos[1],
+                                         infos[2],
+                                         *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Concat:
+        {
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < (infos.size() - 1); i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            return IsConcatSupported(inputInfos,
+                                     infos[infos.size() - 1],
+                                     *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        }
+        case LayerType::Constant:
+            return IsConstantSupported(infos[0], reasonIfUnsupported);
+        case LayerType::ConvertBf16ToFp32:
+            return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp16ToFp32:
+            return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp32ToBf16:
+            return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ConvertFp32ToFp16:
+            return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Convolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsConvolution2dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsConvolution2dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                infos[3],
+                                                reasonIfUnsupported);
+            }
+        }
+        case LayerType::DepthToSpace:
+            return IsDepthToSpaceSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::DepthwiseConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsDepthwiseConvolutionSupported(infos[0],
+                                                       infos[1],
+                                                       desc,
+                                                       infos[2],
+                                                       EmptyOptional(),
+                                                       reasonIfUnsupported);
+            }
+            else
+            {
+                return IsDepthwiseConvolutionSupported(infos[0],
+                                                       infos[1],
+                                                       desc,
+                                                       infos[2],
+                                                       infos[3],
+                                                       reasonIfUnsupported);
+            }
+        }
+        case LayerType::Dequantize:
+            return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Division:
+            return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::ElementwiseUnary:
+            return IsElementwiseUnarySupported(infos[0],
+                                               infos[1],
+                                               *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+                                               reasonIfUnsupported);
+        case LayerType::Fill:
+            return IsFillSupported(infos[0],
+                                   infos[1],
+                                   *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+                                   reasonIfUnsupported);
+        case LayerType::Floor:
+            return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::FullyConnected:
+            return IsFullyConnectedSupported(infos[0],
+                                             infos[1],
+                                             infos[2],
+                                             infos[3],
+                                             *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Gather:
+            return IsGatherSupported(infos[0],
+                                     infos[1],
+                                     infos[2],
+                                     *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Input:
+            return IsInputSupported(infos[0], reasonIfUnsupported);
+        case LayerType::InstanceNormalization:
+            return IsInstanceNormalizationSupported(infos[0],
+                                                    infos[1],
+                                                    *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+                                                        (&descriptor)),
+                                                    reasonIfUnsupported);
+        case LayerType::L2Normalization:
+            return IsL2NormalizationSupported(infos[0],
+                                              infos[1],
+                                              *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+                                              reasonIfUnsupported);
+        case LayerType::LogicalBinary:
+            return IsLogicalBinarySupported(infos[0],
+                                            infos[1],
+                                            infos[2],
+                                            *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+                                            reasonIfUnsupported);
+        case LayerType::LogSoftmax:
+            return IsLogSoftmaxSupported(infos[0],
+                                         infos[1],
+                                         *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+                                         reasonIfUnsupported);
+        case LayerType::Lstm:
+            return IsLstmSupported(infos[0],
+                                   infos[1],
+                                   infos[2],
+                                   infos[3],
+                                   infos[4],
+                                   infos[5],
+                                   infos[6],
+                                   *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+                                   lstmParamsInfo.value(),
+                                   reasonIfUnsupported);
+        case LayerType::QLstm:
+            return IsQLstmSupported(infos[0],
+                                    infos[1],
+                                    infos[2],
+                                    infos[3],
+                                    infos[4],
+                                    infos[5],
+                                    *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+                                    lstmParamsInfo.value(),
+                                    reasonIfUnsupported);
+        case LayerType::Maximum:
+            return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Mean:
+            return IsMeanSupported(infos[0],
+                                   infos[1],
+                                   *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+                                   reasonIfUnsupported);
+        case LayerType::Minimum:
+            return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Multiplication:
+            return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Normalization:
+            return IsNormalizationSupported(infos[0],
+                                            infos[1],
+                                            *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+                                            reasonIfUnsupported);
+        case LayerType::Output:
+            return IsOutputSupported(infos[0], reasonIfUnsupported);
+        case LayerType::Pad:
+            return IsPadSupported(infos[0],
+                                  infos[1],
+                                  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+                                  reasonIfUnsupported);
+        case LayerType::Permute:
+            return IsPermuteSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::Pooling2d:
+            return IsPooling2dSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::Prelu:
+            return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Quantize:
+            return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Reshape:
+            return IsReshapeSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::Resize:
+            return IsResizeSupported(infos[0],
+                                     infos[1],
+                                     *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Reduce:
+            return IsReduceSupported(infos[0],
+                                     infos[1],
+                                     *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+                                     reasonIfUnsupported);
+        case LayerType::Slice:
+            return IsSliceSupported(infos[0],
+                                    infos[1],
+                                    *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+                                    reasonIfUnsupported);
+        case LayerType::Softmax:
+            return IsSoftmaxSupported(infos[0],
+                                      infos[1],
+                                      *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+                                      reasonIfUnsupported);
+        case LayerType::SpaceToBatchNd:
+            return IsSpaceToBatchNdSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::SpaceToDepth:
+            return IsSpaceToDepthSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::Splitter:
+        {
+            std::vector<TensorInfo> outputInfos;
+            for (uint32_t i = 1; i < infos.size(); i++)
+            {
+                outputInfos.push_back(infos[i]);
+            }
+            return IsSplitterSupported(infos[0],
+                                       {outputInfos.begin(), outputInfos.end()},
+                                       *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+                                       reasonIfUnsupported);
+        }
+        case LayerType::Stack:
+        {
+            std::vector<const TensorInfo*> inputInfos;
+            for (uint32_t i = 0; i < infos.size() - 1; i++)
+            {
+                inputInfos.push_back(&infos[i]);
+            }
+            return IsStackSupported(inputInfos,
+                                    infos[infos.size() - 1],
+                                    *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+                                    reasonIfUnsupported);
+        }
+        case LayerType::StridedSlice:
+            return IsStridedSliceSupported(infos[0],
+                                           infos[1],
+                                           *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+                                           reasonIfUnsupported);
+        case LayerType::Subtraction:
+            return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::Transpose:
+            return IsTransposeSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::TransposeConvolution2d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsTransposeConvolution2dSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         EmptyOptional(),
+                                                         reasonIfUnsupported);
+            }
+            else
+            {
+                return IsTransposeConvolution2dSupported(infos[0],
+                                                         infos[1],
+                                                         desc,
+                                                         infos[2],
+                                                         infos[3],
+                                                         reasonIfUnsupported);
+            }
+        }
+        case LayerType::Cast:
+            return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::ChannelShuffle:
+            return IsChannelShuffleSupported(infos[0],
+                                             infos[1],
+                                             *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+                                             reasonIfUnsupported);
+        case LayerType::Convolution3d:
+        {
+            if (infos.size() != 4)
+            {
+                throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+                                               "TensorInfos should be of format: {input, output, weights, biases}.");
+            }
+
+            auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+            if (infos[3] == TensorInfo())
+            {
+                return IsConvolution3dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                EmptyOptional(),
+                                                reasonIfUnsupported);
+            }
+            else
+            {
+                return IsConvolution3dSupported(infos[0],
+                                                infos[1],
+                                                desc,
+                                                infos[2],
+                                                infos[3],
+                                                reasonIfUnsupported);
+            }
+        }
+        case LayerType::Debug:
+            return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::DetectionPostProcess:
+            return IsDetectionPostProcessSupported(infos[0],
+                                                   infos[1],
+                                                   infos[2],
+                                                   infos[3],
+                                                   infos[4],
+                                                   infos[5],
+                                                   infos[6],
+                                                   *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
+                                                       (&descriptor)),
+                                                   reasonIfUnsupported);
+        case LayerType::FakeQuantization:
+            return IsFakeQuantizationSupported(infos[0],
+                                               *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
+                                               reasonIfUnsupported);
+        case LayerType::MemCopy:
+            return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Rank:
+            return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Shape:
+            return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::UnidirectionalSequenceLstm:
+        {
+            if (infos.size() != 6)
+            {
+                throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
+                                               "should be of format: {input, outputStateIn, cellStateIn, "
+                                               "hiddenStateOutputVal, cellStateOutputVal, output}");
+            }
+            auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
+
+            bool isHiddenStateOutputOptional = (infos[4] == TensorInfo());
+            bool isCellStateOutput = (infos[5] == TensorInfo());
+            if (isHiddenStateOutputOptional && isCellStateOutput)
+            {
+                return IsUnidirectionalSequenceLstmSupported(infos[0],
+                                                             infos[1],
+                                                             infos[2],
+                                                             infos[3],
+                                                             EmptyOptional(),
+                                                             EmptyOptional(),
+                                                             desc,
+                                                             lstmParamsInfo.value(),
+                                                             reasonIfUnsupported);
+            }
+            else if (isHiddenStateOutputOptional)
+            {
+                return IsUnidirectionalSequenceLstmSupported(infos[0],
+                                                             infos[1],
+                                                             infos[2],
+                                                             infos[3],
+                                                             EmptyOptional(),
+                                                             infos[5],
+                                                             desc,
+                                                             lstmParamsInfo.value(),
+                                                             reasonIfUnsupported);
+            }
+            else if (isCellStateOutput)
+            {
+                return IsUnidirectionalSequenceLstmSupported(infos[0],
+                                                             infos[1],
+                                                             infos[2],
+                                                             infos[3],
+                                                             infos[4],
+                                                             EmptyOptional(),
+                                                             desc,
+                                                             lstmParamsInfo.value(),
+                                                             reasonIfUnsupported);
+            }
+            else
+            {
+                return IsUnidirectionalSequenceLstmSupported(infos[0],
+                                                             infos[1],
+                                                             infos[2],
+                                                             infos[3],
+                                                             infos[4],
+                                                             infos[5],
+                                                             desc,
+                                                             lstmParamsInfo.value(),
+                                                             reasonIfUnsupported);
+            }
+        }
+        case LayerType::Pooling3d:
+            return IsPooling3dSupported(infos[0],
+                                        infos[1],
+                                        *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
+                                        reasonIfUnsupported);
+        case LayerType::Map:
+            return true;
+        case LayerType::Unmap:
+            return true;
+        case LayerType::MemImport:
+            return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+        case LayerType::Merge:
+            return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+        case LayerType::QuantizedLstm:
+            return LayerSupportBase::IsQuantizedLstmSupported(infos[0],
+                                                              infos[1],
+                                                              infos[2],
+                                                              infos[3],
+                                                              infos[4],
+                                                              quantizedLstmInputParamsInfo.value(),
+                                                              reasonIfUnsupported);
+        default:
+            // layers not supported in neon by default:
+            // precompiled, standin, switch
+            return false;
+    }
+}
+
 bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
                                             const TensorInfo& output,
                                             const ActivationDescriptor& descriptor,
@@ -391,7 +873,7 @@
 
 bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                                         const TensorInfo& output,
-                                        const ConcatDescriptor& descriptor,
+                                        const OriginsDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     IgnoreUnused(descriptor);
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 61d0556..b787d25 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -12,6 +12,13 @@
 class RefLayerSupport : public LayerSupportBase
 {
 public:
+    bool IsLayerSupported(const LayerType& type,
+                          const std::vector<TensorInfo>& infos,
+                          const BaseDescriptor& descriptor,
+                          const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+                          const Optional<QuantizedLstmInputParamsInfo>&,
+                          Optional<std::string&> reasonIfUnsupported) const override;
+
     bool IsActivationSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const ActivationDescriptor& descriptor,
@@ -58,7 +65,7 @@
 
     bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
                            const TensorInfo& output,
-                           const ConcatDescriptor& descriptor,
+                           const OriginsDescriptor& descriptor,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsConstantSupported(const TensorInfo& output,