IVGCVSW-2003: Get rid of IsLayerSupportedNeon functions in favor of ILayerSupport interface

Change-Id: I03985ff678acf9393680340638a2e1f425b9966f
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index a044e04..99e2278 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -5,12 +5,11 @@
 
 #include "NeonLayerSupport.hpp"
 
-#include <InternalTypes.hpp>
-#include <LayerSupportCommon.hpp>
-
 #include <armnn/Descriptors.hpp>
-#include <armnn/Types.hpp>
+#include <armnn/InternalTypes.hpp>
+#include <armnn/LayerSupportCommon.hpp>
 #include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
 
 #include <boost/core/ignore_unused.hpp>
 
@@ -35,357 +34,8 @@
 namespace armnn
 {
 
-bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
-                                             const TensorInfo& output,
-                                             const ActivationDescriptor& descriptor,
-                                             Optional<std::string&> reasonIfUnsupported) const
+namespace
 {
-    return armnn::IsActivationSupportedNeon(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
-                                           const TensorInfo& input1,
-                                           const TensorInfo& output,
-                                           Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsAdditionSupportedNeon(input0, input1, output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
-                                                     const TensorInfo& output,
-                                                     const TensorInfo& mean,
-                                                     const TensorInfo& var,
-                                                     const TensorInfo& beta,
-                                                     const TensorInfo& gamma,
-                                                     const BatchNormalizationDescriptor& descriptor,
-                                                     Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsBatchNormalizationSupportedNeon(input,
-                                                    output,
-                                                    mean,
-                                                    var,
-                                                    beta,
-                                                    gamma,
-                                                    descriptor,
-                                                    reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
-                                           Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsConstantSupportedNeon(output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
-                                                    const TensorInfo& output,
-                                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsConvertFp16ToFp32SupportedNeon(input, output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
-                                                    const TensorInfo& output,
-                                                    Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsConvertFp32ToFp16SupportedNeon(input, output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
-                                                const TensorInfo& output,
-                                                const Convolution2dDescriptor& descriptor,
-                                                const TensorInfo& weights,
-                                                const Optional<TensorInfo>& biases,
-                                                Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsConvolution2dSupportedNeon(input,
-                                               output,
-                                               descriptor,
-                                               weights,
-                                               biases,
-                                               reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
-                                                       const TensorInfo& output,
-                                                       const DepthwiseConvolution2dDescriptor& descriptor,
-                                                       const TensorInfo& weights,
-                                                       const Optional<TensorInfo>& biases,
-                                                       Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsDepthwiseConvolutionSupportedNeon(input,
-                                                      output,
-                                                      descriptor,
-                                                      weights,
-                                                      biases,
-                                                      reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
-                                           const TensorInfo& input1,
-                                           const TensorInfo& output,
-                                           Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsDivisionSupportedNeon(input0, input1, output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
-                                                   const FakeQuantizationDescriptor& descriptor,
-                                                   Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsFakeQuantizationSupportedNeon(input, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
-                                        const TensorInfo& output,
-                                        Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsFloorSupportedNeon(input, output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 const TensorInfo& weights,
-                                                 const TensorInfo& biases,
-                                                 const FullyConnectedDescriptor& descriptor,
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsFullyConnectedSupportedNeon(input,
-                                                output,
-                                                weights,
-                                                biases,
-                                                descriptor,
-                                                reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
-                                        Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsInputSupportedNeon(input, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
-                                                  const TensorInfo& output,
-                                                  const L2NormalizationDescriptor& descriptor,
-                                                  Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsL2NormalizationSupportedNeon(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
-                                       const TensorInfo& outputStateIn,
-                                       const TensorInfo& cellStateIn,
-                                       const TensorInfo& scratchBuffer,
-                                       const TensorInfo& outputStateOut,
-                                       const TensorInfo& cellStateOut,
-                                       const TensorInfo& output,
-                                       const LstmDescriptor& descriptor,
-                                       const TensorInfo& inputToForgetWeights,
-                                       const TensorInfo& inputToCellWeights,
-                                       const TensorInfo& inputToOutputWeights,
-                                       const TensorInfo& recurrentToForgetWeights,
-                                       const TensorInfo& recurrentToCellWeights,
-                                       const TensorInfo& recurrentToOutputWeights,
-                                       const TensorInfo& forgetGateBias,
-                                       const TensorInfo& cellBias,
-                                       const TensorInfo& outputGateBias,
-                                       const TensorInfo* inputToInputWeights,
-                                       const TensorInfo* recurrentToInputWeights,
-                                       const TensorInfo* cellToInputWeights,
-                                       const TensorInfo* inputGateBias,
-                                       const TensorInfo* projectionWeights,
-                                       const TensorInfo* projectionBias,
-                                       const TensorInfo* cellToForgetWeights,
-                                       const TensorInfo* cellToOutputWeights,
-                                       Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsLstmSupportedNeon(input,
-                                      outputStateIn,
-                                      cellStateIn,
-                                      scratchBuffer,
-                                      outputStateOut,
-                                      cellStateOut,
-                                      output,
-                                      descriptor,
-                                      inputToForgetWeights,
-                                      inputToCellWeights,
-                                      inputToOutputWeights,
-                                      recurrentToForgetWeights,
-                                      recurrentToCellWeights,
-                                      recurrentToOutputWeights,
-                                      forgetGateBias,
-                                      cellBias,
-                                      outputGateBias,
-                                      inputToInputWeights,
-                                      recurrentToInputWeights,
-                                      cellToInputWeights,
-                                      inputGateBias,
-                                      projectionWeights,
-                                      projectionBias,
-                                      cellToForgetWeights,
-                                      cellToOutputWeights,
-                                      reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
-                                       const TensorInfo& output,
-                                       const MeanDescriptor& descriptor,
-                                       Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsMeanSupportedNeon(input, output, descriptor,reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
-                                         const OriginsDescriptor& descriptor,
-                                         Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsMergerSupportedNeon(inputs, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
-                                                 const TensorInfo& input1,
-                                                 const TensorInfo& output,
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsMultiplicationSupportedNeon(input0, input1, output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
-                                                const TensorInfo& output,
-                                                const NormalizationDescriptor& descriptor,
-                                                Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsNormalizationSupportedNeon(input,
-                                               output,
-                                               descriptor,
-                                               reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
-                                         Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsOutputSupportedNeon(output, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      const PadDescriptor& descriptor,
-                                      Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsPadSupportedNeon(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
-                                          const TensorInfo& output,
-                                          const PermuteDescriptor& descriptor,
-                                          Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsPermuteSupportedNeon(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
-                                            const TensorInfo& output,
-                                            const Pooling2dDescriptor& descriptor,
-                                            Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsPooling2dSupportedNeon(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
-                                          Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsReshapeSupportedNeon(input, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
-                                                 Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsResizeBilinearSupportedNeon(input, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
-                                          const TensorInfo& output,
-                                          const SoftmaxDescriptor& descriptor,
-                                          Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsSoftmaxSupportedNeon(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
-                                           const ViewsDescriptor& descriptor,
-                                           Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsSplitterSupportedNeon(input, descriptor, reasonIfUnsupported);
-}
-
-bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
-                                              const TensorInfo& input1,
-                                              const TensorInfo& output,
-                                              Optional<std::string&> reasonIfUnsupported) const
-{
-    return armnn::IsSubtractionSupportedNeon(input0, input1, output, reasonIfUnsupported);
-}
-
-//
-// Implementation functions
-//
-// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
-
-bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
-{
-    // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases,
-    // and complement with NEDirectConvolutionLayerKernel::configure() implementation.
-
-    // Only 1x1 is using direct convolution. Performance results and details are in:
-    //    https://jira.arm.com/browse/IVGCVSW-1003
-    // Measurements were taken as of clframework: f105ab972135bcd21304883eff040d7e587099bc
-
-    const bool dataTypeSupported = (weightInfo.GetDataType() == armnn::DataType::Float32);
-
-    // Strides: 1|2|3
-    const bool strideSupported = (desc.m_StrideX == 1 || desc.m_StrideX == 2 || desc.m_StrideX == 3) &&
-                                 (desc.m_StrideY == 1 || desc.m_StrideY == 2 || desc.m_StrideY == 3);
-
-    auto paddingLargerThan = [](const Convolution2dDescriptor& conv2ddesc, unsigned int value)
-    {
-        return conv2ddesc.m_PadLeft > value || conv2ddesc.m_PadRight > value ||
-               conv2ddesc.m_PadTop > value || conv2ddesc.m_PadBottom > value;
-    };
-
-    // Supported sizes and padding.
-    const bool sizeAndPaddingSupported =
-        // Pad > 0 not supported for 1x1 weights.
-        (weightInfo.GetShape()[2] == 1 && weightInfo.GetShape()[3] == 1 && !paddingLargerThan(desc, 0u));
-
-    const bool preferDirectConvolution = dataTypeSupported &&
-                                         strideSupported &&
-                                         sizeAndPaddingSupported &&
-                                         // NEDirectConvolutionLayerKernel doesn't support NULL bias.
-                                         desc.m_BiasEnabled;
-    return preferDirectConvolution;
-}
-
-bool IsNeonNormalizationDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
-                                            const NormalizationDescriptor& parameters)
-{
-    if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
-    {
-        if (reasonIfUnsupported)
-        {
-            reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
-        }
-        return false;
-    }
-    if (parameters.m_NormSize % 2 == 0)
-    {
-        if (reasonIfUnsupported)
-        {
-            reasonIfUnsupported.value() = "Normalization size must be an odd number.";
-        }
-        return false;
-    }
-
-    return true;
-}
 
 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
 {
@@ -436,10 +86,12 @@
     return IsNeonBackendSupported(reasonIfUnsupported);
 #endif
 
-bool IsActivationSupportedNeon(const TensorInfo& input,
-                               const TensorInfo& output,
-                               const ActivationDescriptor& descriptor,
-                               Optional<std::string&> reasonIfUnsupported)
+} // anonymous namespace
+
+bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             const ActivationDescriptor& descriptor,
+                                             Optional<std::string&> reasonIfUnsupported) const
 {
     ignore_unused(descriptor);
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
@@ -449,10 +101,10 @@
                                    descriptor);
 }
 
-bool IsAdditionSupportedNeon(const TensorInfo& input0,
-                             const TensorInfo& input1,
-                             const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
+                                           const TensorInfo& input1,
+                                           const TensorInfo& output,
+                                           Optional<std::string&> reasonIfUnsupported) const
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -461,14 +113,14 @@
                                    output);
 }
 
-bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
-                                       const TensorInfo& output,
-                                       const TensorInfo& mean,
-                                       const TensorInfo& var,
-                                       const TensorInfo& beta,
-                                       const TensorInfo& gamma,
-                                       const BatchNormalizationDescriptor& descriptor,
-                                       Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
+                                                     const TensorInfo& output,
+                                                     const TensorInfo& mean,
+                                                     const TensorInfo& var,
+                                                     const TensorInfo& beta,
+                                                     const TensorInfo& gamma,
+                                                     const BatchNormalizationDescriptor& descriptor,
+                                                     Optional<std::string&> reasonIfUnsupported) const
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
                                    reasonIfUnsupported,
@@ -481,8 +133,8 @@
                                    descriptor);
 }
 
-bool IsConstantSupportedNeon(const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
+                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       output.GetDataType(),
@@ -490,12 +142,32 @@
                                       &TrueFunc<>);
 }
 
-bool IsConvolution2dSupportedNeon(const TensorInfo& input,
-                                  const TensorInfo& output,
-                                  const Convolution2dDescriptor& descriptor,
-                                  const TensorInfo& weights,
-                                  const Optional<TensorInfo>& biases,
-                                  Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return true;
+}
+
+bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return true;
+}
+
+bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const Convolution2dDescriptor& descriptor,
+                                                const TensorInfo& weights,
+                                                const Optional<TensorInfo>& biases,
+                                                Optional<std::string&> reasonIfUnsupported) const
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
                                    reasonIfUnsupported,
@@ -506,12 +178,12 @@
                                    biases);
 }
 
-bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
-                                         const TensorInfo& output,
-                                         const DepthwiseConvolution2dDescriptor& descriptor,
-                                         const TensorInfo& weights,
-                                         const Optional<TensorInfo>& biases,
-                                         Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                       const TensorInfo& output,
+                                                       const DepthwiseConvolution2dDescriptor& descriptor,
+                                                       const TensorInfo& weights,
+                                                       const Optional<TensorInfo>& biases,
+                                                       Optional<std::string&> reasonIfUnsupported) const
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
                                    reasonIfUnsupported,
@@ -522,12 +194,11 @@
                                    biases);
 }
 
-bool IsDivisionSupportedNeon(const TensorInfo& input0,
-                             const TensorInfo& input1,
-                             const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+                                           const TensorInfo& input1,
+                                           const TensorInfo& output,
+                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    // At the moment division is not supported
     ignore_unused(input0);
     ignore_unused(input1);
     ignore_unused(output);
@@ -535,24 +206,35 @@
     return false;
 }
 
-bool IsSubtractionSupportedNeon(const TensorInfo& input0,
-                                const TensorInfo& input1,
-                                const TensorInfo& output,
-                                Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
+                                                   const FakeQuantizationDescriptor& descriptor,
+                                                   Optional<std::string&> reasonIfUnsupported) const
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
-                                   reasonIfUnsupported,
-                                   input0,
-                                   input1,
-                                   output);
+    ignore_unused(input);
+    ignore_unused(descriptor);
+    ignore_unused(reasonIfUnsupported);
+    return false;
 }
 
-bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   const TensorInfo& weights,
-                                   const TensorInfo& biases,
-                                   const FullyConnectedDescriptor& descriptor,
-                                   Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
+                                        const TensorInfo& output,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(output);
+    return IsNeonBackendSupported(reasonIfUnsupported) &&
+           IsSupportedForDataTypeGeneric(reasonIfUnsupported,
+                                         input.GetDataType(),
+                                         &FalseFuncF16<>,
+                                         &TrueFunc<>,
+                                         &FalseFuncU8<>);
+}
+
+bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 const TensorInfo& weights,
+                                                 const TensorInfo& biases,
+                                                 const FullyConnectedDescriptor& descriptor,
+                                                 Optional<std::string&> reasonIfUnsupported) const
 {
     // At the moment U8 is unsupported
     if (input.GetDataType() == DataType::QuantisedAsymm8)
@@ -568,8 +250,8 @@
                                    descriptor);
 }
 
-bool IsInputSupportedNeon(const TensorInfo& input,
-                          Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
+                                        Optional<std::string&> reasonIfUnsupported) const
 {
     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
                                       input.GetDataType(),
@@ -577,155 +259,40 @@
                                       &TrueFunc<>);
 }
 
-bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
-                                    const TensorInfo& output,
-                                    const L2NormalizationDescriptor& descriptor,
-                                    Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
+                                                  const TensorInfo& output,
+                                                  const L2NormalizationDescriptor& descriptor,
+                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
-bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
-                           const OriginsDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(descriptor);
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      inputs[0]->GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
-}
-
-bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
-                                   const TensorInfo& input1,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported)
-{
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
-                                   reasonIfUnsupported,
-                                   input0,
-                                   input1,
-                                   output);
-}
-
-bool IsNormalizationSupportedNeon(const TensorInfo& input,
-                                  const TensorInfo& output,
-                                  const NormalizationDescriptor& descriptor,
-                                  Optional<std::string&> reasonIfUnsupported)
-{
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
-}
-
-bool IsOutputSupportedNeon(const TensorInfo& output,
-                           Optional<std::string&> reasonIfUnsupported)
-{
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      output.GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
-}
-
-bool IsPermuteSupportedNeon(const TensorInfo& input,
-                            const TensorInfo& output,
-                            const PermuteDescriptor& descriptor,
-                            Optional<std::string&> reasonIfUnsupported)
-{
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
-}
-
-bool IsPooling2dSupportedNeon(const TensorInfo& input,
-                              const TensorInfo& output,
-                              const Pooling2dDescriptor& descriptor,
-                              Optional<std::string&> reasonIfUnsupported)
-{
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
-}
-
-bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
-                                   Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(input);
-    ignore_unused(reasonIfUnsupported);
-    return false;
-}
-
-bool IsSoftmaxSupportedNeon(const TensorInfo& input,
-                            const TensorInfo& output,
-                            const SoftmaxDescriptor& descriptor,
-                            Optional<std::string&> reasonIfUnsupported)
-{
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
-}
-
-bool IsSplitterSupportedNeon(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(descriptor);
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      input.GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
-}
-
-bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
-                                     const FakeQuantizationDescriptor& descriptor,
-                                     Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(input);
-    ignore_unused(descriptor);
-    ignore_unused(reasonIfUnsupported);
-    return false;
-}
-
-bool IsReshapeSupportedNeon(const TensorInfo& input,
-                            Optional<std::string&> reasonIfUnsupported)
-{
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      input.GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
-}
-
-bool IsFloorSupportedNeon(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(output);
-    return IsNeonBackendSupported(reasonIfUnsupported) &&
-           IsSupportedForDataTypeGeneric(reasonIfUnsupported,
-                                         input.GetDataType(),
-                                         &FalseFuncF16<>,
-                                         &TrueFunc<>,
-                                         &FalseFuncU8<>);
-}
-
-bool IsLstmSupportedNeon(const TensorInfo& input,
-                         const TensorInfo& outputStateIn,
-                         const TensorInfo& cellStateIn,
-                         const TensorInfo& scratchBuffer,
-                         const TensorInfo& outputStateOut,
-                         const TensorInfo& cellStateOut,
-                         const TensorInfo& output,
-                         const LstmDescriptor& descriptor,
-                         const TensorInfo& inputToForgetWeights,
-                         const TensorInfo& inputToCellWeights,
-                         const TensorInfo& inputToOutputWeights,
-                         const TensorInfo& recurrentToForgetWeights,
-                         const TensorInfo& recurrentToCellWeights,
-                         const TensorInfo& recurrentToOutputWeights,
-                         const TensorInfo& forgetGateBias,
-                         const TensorInfo& cellBias,
-                         const TensorInfo& outputGateBias,
-                         const TensorInfo* inputToInputWeights,
-                         const TensorInfo* recurrentToInputWeights,
-                         const TensorInfo* cellToInputWeights,
-                         const TensorInfo* inputGateBias,
-                         const TensorInfo* projectionWeights,
-                         const TensorInfo* projectionBias,
-                         const TensorInfo* cellToForgetWeights,
-                         const TensorInfo* cellToOutputWeights,
-                         Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
+                                       const TensorInfo& outputStateIn,
+                                       const TensorInfo& cellStateIn,
+                                       const TensorInfo& scratchBuffer,
+                                       const TensorInfo& outputStateOut,
+                                       const TensorInfo& cellStateOut,
+                                       const TensorInfo& output,
+                                       const LstmDescriptor& descriptor,
+                                       const TensorInfo& inputToForgetWeights,
+                                       const TensorInfo& inputToCellWeights,
+                                       const TensorInfo& inputToOutputWeights,
+                                       const TensorInfo& recurrentToForgetWeights,
+                                       const TensorInfo& recurrentToCellWeights,
+                                       const TensorInfo& recurrentToOutputWeights,
+                                       const TensorInfo& forgetGateBias,
+                                       const TensorInfo& cellBias,
+                                       const TensorInfo& outputGateBias,
+                                       const TensorInfo* inputToInputWeights,
+                                       const TensorInfo* recurrentToInputWeights,
+                                       const TensorInfo* cellToInputWeights,
+                                       const TensorInfo* inputGateBias,
+                                       const TensorInfo* projectionWeights,
+                                       const TensorInfo* projectionBias,
+                                       const TensorInfo* cellToForgetWeights,
+                                       const TensorInfo* cellToOutputWeights,
+                                       Optional<std::string&> reasonIfUnsupported) const
 {
     ignore_unused(input);
     ignore_unused(outputStateIn);
@@ -756,30 +323,10 @@
     return false;
 }
 
-bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(input);
-    ignore_unused(output);
-    ignore_unused(reasonIfUnsupported);
-    return true;
-}
-
-bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported)
-{
-    ignore_unused(input);
-    ignore_unused(output);
-    ignore_unused(reasonIfUnsupported);
-    return true;
-}
-
-bool IsMeanSupportedNeon(const TensorInfo& input,
-                         const TensorInfo& output,
-                         const MeanDescriptor& descriptor,
-                         Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const MeanDescriptor& descriptor,
+                                       Optional<std::string&> reasonIfUnsupported) const
 {
     ignore_unused(input);
     ignore_unused(output);
@@ -788,10 +335,54 @@
     return false;
 }
 
-bool IsPadSupportedNeon(const TensorInfo& input,
-                        const TensorInfo& output,
-                        const PadDescriptor& descriptor,
-                        Optional<std::string&> reasonIfUnsupported)
+bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+                                         const OriginsDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
+                                      inputs[0]->GetDataType(),
+                                      &TrueFunc<>,
+                                      &TrueFunc<>);
+}
+
+bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
+                                                 const TensorInfo& input1,
+                                                 const TensorInfo& output,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
+}
+
+bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const NormalizationDescriptor& descriptor,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor);
+}
+
+bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
+                                      output.GetDataType(),
+                                      &TrueFunc<>,
+                                      &TrueFunc<>);
+}
+
+bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const PadDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported) const
 {
     ignore_unused(input);
     ignore_unused(output);
@@ -800,4 +391,102 @@
     return false;
 }
 
+bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const PermuteDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
+
+bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const Pooling2dDescriptor& descriptor,
+                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
+}
+
+bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
+                                      input.GetDataType(),
+                                      &TrueFunc<>,
+                                      &TrueFunc<>);
+}
+
+bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input);
+    ignore_unused(reasonIfUnsupported);
+    return false;
+}
+
+bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const SoftmaxDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
+}
+
+bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                           const ViewsDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
+                                      input.GetDataType(),
+                                      &TrueFunc<>,
+                                      &TrueFunc<>);
+}
+
+bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
+                                              const TensorInfo& input1,
+                                              const TensorInfo& output,
+                                              Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
+}
+
+bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
+{
+    // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases,
+    // and complement with NEDirectConvolutionLayerKernel::configure() implementation.
+
+    // Only 1x1 is using direct convolution. Performance results and details are in:
+    //    https://jira.arm.com/browse/IVGCVSW-1003
+    // Measurements were taken as of clframework: f105ab972135bcd21304883eff040d7e587099bc
+
+    const bool dataTypeSupported = (weightInfo.GetDataType() == armnn::DataType::Float32);
+
+    // Strides: 1|2|3
+    const bool strideSupported = (desc.m_StrideX == 1 || desc.m_StrideX == 2 || desc.m_StrideX == 3) &&
+                                 (desc.m_StrideY == 1 || desc.m_StrideY == 2 || desc.m_StrideY == 3);
+
+    auto paddingLargerThan = [](const Convolution2dDescriptor& conv2ddesc, unsigned int value)
+    {
+        return conv2ddesc.m_PadLeft > value || conv2ddesc.m_PadRight > value ||
+               conv2ddesc.m_PadTop > value || conv2ddesc.m_PadBottom > value;
+    };
+
+    // Supported sizes and padding.
+    const bool sizeAndPaddingSupported =
+        // Pad > 0 not supported for 1x1 weights.
+        (weightInfo.GetShape()[2] == 1 && weightInfo.GetShape()[3] == 1 && !paddingLargerThan(desc, 0u));
+
+    const bool preferDirectConvolution = dataTypeSupported &&
+                                         strideSupported &&
+                                         sizeAndPaddingSupported &&
+                                         // NEDirectConvolutionLayerKernel doesn't support NULL bias.
+                                         desc.m_BiasEnabled;
+    return preferDirectConvolution;
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 1223ba8..5e80ab8 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -167,172 +167,8 @@
                                 const TensorInfo& input1,
                                 const TensorInfo& output,
                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-};
+}; // class NeonLayerSupport
 
 bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
 
-bool IsNeonNormalizationDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
-                                            const NormalizationDescriptor& parameters);
-
-bool IsActivationSupportedNeon(const TensorInfo& input,
-                               const TensorInfo& output,
-                               const ActivationDescriptor& descriptor,
-                               Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsNeonDepthwiseConvolution2dDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
-                                                     const DepthwiseConvolution2dDescriptor& parameters,
-                                                     const TensorInfo& weights);
-
-bool IsAdditionSupportedNeon(const TensorInfo& input0,
-                             const TensorInfo& input1,
-                             const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
-                                       const TensorInfo& output,
-                                       const TensorInfo& mean,
-                                       const TensorInfo& var,
-                                       const TensorInfo& beta,
-                                       const TensorInfo& gamma,
-                                       const BatchNormalizationDescriptor& descriptor,
-                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConstantSupportedNeon(const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConvolution2dSupportedNeon(const TensorInfo& input,
-                                  const TensorInfo& output,
-                                  const Convolution2dDescriptor& descriptor,
-                                  const TensorInfo& weights,
-                                  const Optional<TensorInfo>& biases,
-                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-
-bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
-                                         const TensorInfo& output,
-                                         const DepthwiseConvolution2dDescriptor& descriptor,
-                                         const TensorInfo& weights,
-                                         const Optional<TensorInfo>& biases,
-                                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsDivisionSupportedNeon(const TensorInfo& input0,
-                             const TensorInfo& input1,
-                             const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsSubtractionSupportedNeon(const TensorInfo& input0,
-                                const TensorInfo& input1,
-                                const TensorInfo& output,
-                                Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
-                                   const TensorInfo& output,
-                                   const TensorInfo& weights,
-                                   const TensorInfo& biases,
-                                   const FullyConnectedDescriptor& descriptor,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsInputSupportedNeon(const TensorInfo& input,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
-                                    const TensorInfo& output,
-                                    const L2NormalizationDescriptor& descriptor,
-                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
-                           const OriginsDescriptor& descriptor,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
-                                   const TensorInfo& input1,
-                                   const TensorInfo& output,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsNormalizationSupportedNeon(const TensorInfo& input,
-                                  const TensorInfo& output,
-                                  const NormalizationDescriptor& descriptor,
-                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsOutputSupportedNeon(const TensorInfo& output,
-                           Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsPermuteSupportedNeon(const TensorInfo& input,
-                            const TensorInfo& output,
-                            const PermuteDescriptor& descriptor,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsPooling2dSupportedNeon(const TensorInfo& input,
-                              const TensorInfo& output,
-                              const Pooling2dDescriptor& descriptor,
-                              Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
-                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsSoftmaxSupportedNeon(const TensorInfo& input,
-                            const TensorInfo& output,
-                            const SoftmaxDescriptor& descriptor,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsSplitterSupportedNeon(const TensorInfo& input,
-                             const ViewsDescriptor& descriptor,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
-                                     const FakeQuantizationDescriptor& descriptor,
-                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsReshapeSupportedNeon(const TensorInfo& input,
-                            Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsFloorSupportedNeon(const TensorInfo& input,
-                          const TensorInfo& output,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsLstmSupportedNeon(const TensorInfo& input,
-                         const TensorInfo& outputStateIn,
-                         const TensorInfo& cellStateIn,
-                         const TensorInfo& scratchBuffer,
-                         const TensorInfo& outputStateOut,
-                         const TensorInfo& cellStateOut,
-                         const TensorInfo& output,
-                         const LstmDescriptor& descriptor,
-                         const TensorInfo& inputToForgetWeights,
-                         const TensorInfo& inputToCellWeights,
-                         const TensorInfo& inputToOutputWeights,
-                         const TensorInfo& recurrentToForgetWeights,
-                         const TensorInfo& recurrentToCellWeights,
-                         const TensorInfo& recurrentToOutputWeights,
-                         const TensorInfo& forgetGateBias,
-                         const TensorInfo& cellBias,
-                         const TensorInfo& outputGateBias,
-                         const TensorInfo* inputToInputWeights,
-                         const TensorInfo* recurrentToInputWeights,
-                         const TensorInfo* cellToInputWeights,
-                         const TensorInfo* inputGateBias,
-                         const TensorInfo* projectionWeights,
-                         const TensorInfo* projectionBias,
-                         const TensorInfo* cellToForgetWeights,
-                         const TensorInfo* cellToOutputWeights,
-                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsMeanSupportedNeon(const TensorInfo& input,
-                         const TensorInfo& output,
-                         const MeanDescriptor& descriptor,
-                         Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsPadSupportedNeon(const TensorInfo& input,
-                        const TensorInfo& output,
-                        const PadDescriptor& descriptor,
-                        Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
 } // namespace armnn
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 36138b3..31ee7d8 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -154,78 +154,79 @@
     armnn::TensorInfo biasesInfo;
 
     armnn::DepthwiseConvolution2dDescriptor descriptor;
+    armnn::NeonLayerSupport layerSupport;
 
     // Strides supported: 1,2,3
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(1, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(1, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     // Supported stride 4
     descriptor = MakeDepthwiseConv2dDesc(4, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 
     // Supported weights shape 1x1
     armnn::TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, armnn::DataType::Float32);
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo1x1, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo1x1, biasesInfo));
 
     // Supported shape 2x2
     armnn::TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, armnn::DataType::Float32);
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo2x2, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo2x2, biasesInfo));
 
     // Asymmetric padding
     descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor,
-                                                          weightsInfo3x3, biasesInfo));
+    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+                                                            weightsInfo3x3, biasesInfo));
 }
 
 // Pooling
@@ -298,7 +299,8 @@
     const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32);
 
     // 4D Softmax should be reported as unsupported on the NEON backend
-    BOOST_TEST(!armnn::IsSoftmaxSupportedNeon(inputInfo, outputInfo, armnn::SoftmaxDescriptor()));
+    armnn::NeonLayerSupport layerSupport;
+    BOOST_TEST(!layerSupport.IsSoftmaxSupported(inputInfo, outputInfo, armnn::SoftmaxDescriptor()));
 }
 
 // Splitter
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index 0deff79..1894048 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -13,6 +13,34 @@
 namespace armnn
 {
 
+namespace
+{
+
+bool IsNeonNormalizationDescriptorSupported(const NormalizationDescriptor& parameters,
+                                            Optional<std::string&> reasonIfUnsupported)
+{
+    if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
+    {
+        if (reasonIfUnsupported)
+        {
+            reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
+        }
+        return false;
+    }
+    if (parameters.m_NormSize % 2 == 0)
+    {
+        if (reasonIfUnsupported)
+        {
+            reasonIfUnsupported.value() = "Normalization size must be an odd number.";
+        }
+        return false;
+    }
+
+    return true;
+}
+
+} // anonymous namespace
+
 arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
                                                       const TensorInfo& output,
                                                       const NormalizationDescriptor& descriptor)
@@ -33,7 +61,7 @@
 {
     m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
     std::string reasonIfUnsupported;
-    if (!IsNeonNormalizationDescParamsSupported(Optional<std::string&>(reasonIfUnsupported), m_Data.m_Parameters))
+    if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional<std::string&>(reasonIfUnsupported)))
     {
         throw UnimplementedException(reasonIfUnsupported);
     }