IVGCVSW-6641 Stabilize the IWorkloadFactory interface with unified strategy

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ia941be9bf2c15fe56e49a9b9a2bbe943a8152438
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index 7406547..2114158 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -27,6 +27,8 @@
     std::vector<ITensorHandle*> m_Outputs;
     void* m_AdditionalInfoObject;
 
+    virtual ~QueueDescriptor() = default;
+
     void ValidateInputsOutputs(const std::string& descName,
                                unsigned int numExpectedIn,
                                unsigned int numExpectedOut) const;
@@ -38,7 +40,6 @@
     }
 
 protected:
-    ~QueueDescriptor() = default;
     QueueDescriptor()
         : m_AdditionalInfoObject(nullptr)
     {}
@@ -52,8 +53,9 @@
 {
     LayerDescriptor m_Parameters;
 
+    virtual ~QueueDescriptorWithParameters() = default;
+
 protected:
-    ~QueueDescriptorWithParameters() = default;
     QueueDescriptorWithParameters() = default;
     QueueDescriptorWithParameters(QueueDescriptorWithParameters const&) = default;
     QueueDescriptorWithParameters& operator=(QueueDescriptorWithParameters const&) = default;
diff --git a/include/armnn/backends/WorkloadFactory.hpp b/include/armnn/backends/WorkloadFactory.hpp
index 68ad2e3..17e56f1 100644
--- a/include/armnn/backends/WorkloadFactory.hpp
+++ b/include/armnn/backends/WorkloadFactory.hpp
@@ -68,212 +68,354 @@
                                                               DataLayout dataLayout,
                                                               const bool IsMemoryManaged = true) const = 0;
 
+    virtual std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+                                                      const QueueDescriptor& descriptor,
+                                                      const WorkloadInfo& info) const;
+
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                         const WorkloadInfo&              info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                                       const WorkloadInfo&            info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
                                                        const WorkloadInfo&            info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
                                                             const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                                   const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                         const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                                     const WorkloadInfo&          info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(
         const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateDetectionPostProcess(
         const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                               const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
                                                               const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
                                                   const WorkloadInfo&        info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
                                                             const WorkloadInfo&                  info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateInstanceNormalization(
         const InstanceNormalizationQueueDescriptor& descriptor,
         const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
                                                            const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                           const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
                                                   const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
                                                             const WorkloadInfo&                  info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
                                                            const WorkloadInfo&                 info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
                                                  const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
                                                      const WorkloadInfo&           info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
                                                        const WorkloadInfo&           info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
                                                        const WorkloadInfo&           info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                                       const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                      const WorkloadInfo&           info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                                       const WorkloadInfo&            info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                           const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
                                                     const WorkloadInfo& Info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateTransposeConvolution2d(
         const TransposeConvolution2dQueueDescriptor& descriptor,
         const WorkloadInfo& info) const;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     virtual std::unique_ptr<IWorkload> CreateUnidirectionalSequenceLstm(
         const UnidirectionalSequenceLstmQueueDescriptor& descriptor,
         const WorkloadInfo& info) const;
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index bc9e4f6..13fa24a 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -21,10 +21,11 @@
 
 std::unique_ptr<IWorkload> AbsLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    AbsQueueDescriptor descriptor;
+    ElementwiseUnaryQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateAbs(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ElementwiseUnary, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 AbsLayer* AbsLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 2cb1607..63c98a9 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -22,7 +22,7 @@
     ActivationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateActivation(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Activation, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ActivationLayer* ActivationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index c3e98a1..f55bb55e 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -24,7 +24,7 @@
     AdditionQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateAddition(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Addition, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 AdditionLayer* AdditionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index dc69ef6..30db7ba 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -26,7 +26,7 @@
     ArgMinMaxQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ArgMinMax, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index e830b18..18d167f 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -34,7 +34,7 @@
     descriptor.m_Beta = m_Beta.get();
     descriptor.m_Gamma = m_Gamma.get();
 
-    return factory.CreateBatchNormalization(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::BatchNormalization, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index ce602ad..485500d 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -33,7 +33,7 @@
     BatchToSpaceNdQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::BatchToSpaceNd, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index c5cecb4..03b6865 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -23,7 +23,7 @@
     CastQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateCast(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Cast, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 CastLayer* CastLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index 5f4729f..78a2393 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -24,7 +24,7 @@
     ChannelShuffleQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateChannelShuffle(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ChannelShuffle, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ChannelShuffleLayer* ChannelShuffleLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 47430f1..cf16386 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -25,7 +25,7 @@
     ComparisonQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateComparison(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Comparison,descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ComparisonLayer* ComparisonLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 98cb585..b59e0b9 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -33,7 +33,7 @@
     }
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Concat, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 template<typename FactoryType>
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 0ab92cc..0c06dd5 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -24,7 +24,7 @@
     descriptor.m_LayerOutput = m_LayerOutput.get();
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConstant(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Constant, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ConstantLayer* ConstantLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index e589008..6d843f3 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -24,7 +24,7 @@
     ConvertBf16ToFp32QueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ConvertBf16ToFp32, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 656f59f..cc3c8b1 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -24,7 +24,7 @@
     ConvertFp16ToFp32QueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConvertFp16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ConvertFp16ToFp32, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index b2f4eb1..978fbd1 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -24,7 +24,7 @@
     ConvertFp32ToBf16QueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConvertFp32ToBf16(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ConvertFp32ToBf16, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index f5a8b01..2e1074a 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -23,7 +23,7 @@
     ConvertFp32ToFp16QueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConvertFp32ToFp16(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ConvertFp32ToFp16, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 1c60f17..68e1cb5 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -63,7 +63,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Convolution2d, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index cc33eae..42b275e 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -50,7 +50,7 @@
     Convolution3dQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateConvolution3d(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Convolution3d, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 Convolution3dLayer* Convolution3dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 96538fe..90a55cb 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -28,7 +28,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Debug, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 DebugLayer* DebugLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 05d691e..033154e 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -30,7 +30,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateDepthToSpace(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::DepthToSpace, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 DepthToSpaceLayer* DepthToSpaceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index b1b95fa..db14e22 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -65,7 +65,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::DepthwiseConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 84c9010..afa0a73 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -22,7 +22,7 @@
     DequantizeQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Dequantize, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 5a7d888..833ef43 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -26,7 +26,7 @@
     descriptor.m_Anchors = m_Anchors.get();
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateDetectionPostProcess(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::DetectionPostProcess, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index c65e191..c6faf41 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -24,7 +24,7 @@
     DivisionQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateDivision(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Division, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 DivisionLayer* DivisionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 37d6084..c50910b 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -23,13 +23,7 @@
 std::unique_ptr<IWorkload> ElementwiseUnaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     ElementwiseUnaryQueueDescriptor descriptor;
-
-    if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
-    {
-        return factory.CreateLogicalUnary(descriptor, PrepInfoAndDesc(descriptor));
-    }
-
-    return factory.CreateElementwiseUnary(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ElementwiseUnary, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 453891f..f375f9a 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -23,7 +23,7 @@
     FakeQuantizationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor) );
+    return factory.CreateWorkload(LayerType::FakeQuantization, descriptor, PrepInfoAndDesc(descriptor) );
 }
 
 FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 9fb1bda..5004fab 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -23,7 +23,7 @@
     FillQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateFill(descriptor, PrepInfoAndDesc(descriptor) );
+    return factory.CreateWorkload(LayerType::Fill, descriptor, PrepInfoAndDesc(descriptor) );
 }
 
 FillLayer* FillLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 5dbbc28..616c118 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -23,7 +23,7 @@
     FloorQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateFloor(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Floor, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 FloorLayer* FloorLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 261932a..6a9c3b0 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -32,7 +32,7 @@
     }
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::FullyConnected, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index 9b34c12..33d2088 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -23,7 +23,7 @@
     GatherQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Gather, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 GatherLayer* GatherLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 6d06c36..44e9870 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -23,7 +23,7 @@
     InstanceNormalizationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateInstanceNormalization(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::InstanceNormalization, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 7a01b07..0e0ae2e 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -23,7 +23,7 @@
     L2NormalizationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateL2Normalization(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::L2Normalization, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index e363202..4f51a2b 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -23,7 +23,7 @@
     LogSoftmaxQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateLogSoftmax(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::LogSoftmax, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index cc0ed24..1a20c98 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -23,7 +23,7 @@
 std::unique_ptr<IWorkload> LogicalBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     LogicalBinaryQueueDescriptor descriptor;
-    return factory.CreateLogicalBinary(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::LogicalBinary, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 LogicalBinaryLayer* LogicalBinaryLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 44da986..46c7574 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -74,7 +74,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateLstm(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Lstm, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 LstmLayer* LstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 077f83f..438c9be 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -23,7 +23,7 @@
     MaximumQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateMaximum(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Maximum, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 MaximumLayer* MaximumLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index b56905f..f695cc3 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -28,7 +28,7 @@
     descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Mean, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 MeanLayer* MeanLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 43715ac..8947041 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -24,7 +24,7 @@
     MinimumQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateMinimum(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Minimum, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 MinimumLayer* MinimumLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 05e266c..36f2689 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -24,7 +24,7 @@
     MultiplicationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateMultiplication(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Multiplication, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index e7b6de5..e42a7cf 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -23,7 +23,7 @@
     NormalizationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateNormalization(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Normalization, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 667270a..7900fa5 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -26,7 +26,7 @@
     descriptor.m_Parameters.m_PaddingMode = m_Param.m_PaddingMode;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Pad, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 PadLayer* PadLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 1a4d87b..e20eea6 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -27,7 +27,7 @@
     PermuteQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreatePermute(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Permute, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 PermuteLayer* PermuteLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index ce58012..9fb055b 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -29,7 +29,7 @@
     Pooling2dQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Pooling2d, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
index 4c083f3..046e146 100644
--- a/src/armnn/layers/Pooling3dLayer.cpp
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -29,7 +29,7 @@
     Pooling3dQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreatePooling3d(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Pooling3d, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 Pooling3dLayer* Pooling3dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 80320e3..ff2fa32 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -34,7 +34,7 @@
     descriptor.m_PreCompiledObject = m_PreCompiledObject.get();
     SetAdditionalInfo(descriptor);
 
-    return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::PreCompiled, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 void PreCompiledLayer::ValidateTensorShapesFromInputs()
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 8d88ed4..431e2f4 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -25,7 +25,7 @@
     PreluQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Prelu, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 PreluLayer* PreluLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index a09aaee..17031fa 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -75,7 +75,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateQLstm(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::QLstm, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 QLstmLayer* QLstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index e37d6f5..55f23bf 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -23,7 +23,7 @@
 
     WorkloadInfo info = PrepInfoAndDesc(descriptor);
 
-    return factory.CreateQuantize(descriptor, info);
+    return factory.CreateWorkload(LayerType::Quantize, descriptor, info);
 }
 
 Layer* QuantizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 6a09241..7fd39f1 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -41,7 +41,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateQuantizedLstm(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::QuantizedLstm, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 17ca691..84d25bf 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -22,7 +22,7 @@
     RankQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateRank(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Rank, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 Layer* RankLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index b03ac72..1f4387b 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -27,7 +27,7 @@
     descriptor.m_Parameters.m_ReduceOperation = m_Param.m_ReduceOperation;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateReduce(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Reduce, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ReduceLayer* ReduceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 571013d..b194f7a 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -24,7 +24,7 @@
     ReshapeQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateReshape(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Reshape, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 9c407c1..89a94f7 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -28,7 +28,7 @@
     ResizeQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Resize, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ResizeLayer* ResizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index adac012..3a63b7c 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -21,10 +21,11 @@
 
 std::unique_ptr<IWorkload> RsqrtLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    RsqrtQueueDescriptor descriptor;
+    ElementwiseUnaryQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateRsqrt(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::ElementwiseUnary, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index 318f38c..ecc112c 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -26,7 +26,7 @@
     ShapeQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateShape(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Shape, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 ShapeLayer* ShapeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index c87cab3..0d61181 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -26,7 +26,7 @@
     SliceQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSlice(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Slice, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 SliceLayer* SliceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 3a6dfc4..a2930e6 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -23,7 +23,7 @@
     SoftmaxQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSoftmax(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Softmax, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index e801925..a4c6d1b 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -31,7 +31,7 @@
     descriptor.m_Parameters.m_PadList    = m_Param.m_PadList;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::SpaceToBatchNd, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 612d940..51d79f4 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -32,7 +32,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::SpaceToDepth, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 5e658ce..42cb6e1 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -31,7 +31,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Splitter, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 template<typename FactoryType>
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 09f255b..b842f1b 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -24,7 +24,7 @@
     StackQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Stack, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 StackLayer* StackLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index e80ec22..56051c2 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -36,7 +36,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateStridedSlice(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::StridedSlice, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index c5f9ca9..8e9b173 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -24,7 +24,7 @@
     SubtractionQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Subtraction, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 810bd33..afa4d52 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -21,7 +21,7 @@
     SwitchQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateSwitch(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Switch, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 SwitchLayer* SwitchLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 689a3f5..a1f07f9 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -37,7 +37,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::TransposeConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 3c34df9..3340b9d 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -27,7 +27,7 @@
     TransposeQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateTranspose(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::Transpose, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 TransposeLayer* TransposeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 911ba2e..c9aaa8c 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -74,7 +74,7 @@
 
     SetAdditionalInfo(descriptor);
 
-    return factory.CreateUnidirectionalSequenceLstm(descriptor, PrepInfoAndDesc(descriptor));
+    return factory.CreateWorkload(LayerType::UnidirectionalSequenceLstm, descriptor, PrepInfoAndDesc(descriptor));
 }
 
 UnidirectionalSequenceLstmLayer* UnidirectionalSequenceLstmLayer::Clone(Graph& graph) const
diff --git a/src/backends/aclCommon/test/MemCopyTestImpl.hpp b/src/backends/aclCommon/test/MemCopyTestImpl.hpp
index db794b9..956ea27 100644
--- a/src/backends/aclCommon/test/MemCopyTestImpl.hpp
+++ b/src/backends/aclCommon/test/MemCopyTestImpl.hpp
@@ -65,7 +65,7 @@
     AddInputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadInput.get());
     AddOutputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadOutput.get());
 
-    dstWorkloadFactory.CreateMemCopy(memCopyQueueDesc, workloadInfo)->Execute();
+    dstWorkloadFactory.CreateWorkload(armnn::LayerType::MemCopy, memCopyQueueDesc, workloadInfo)->Execute();
 
     CopyDataFromITensorHandle(actualOutput.data(), workloadOutput.get());
     ret.m_ActualData = actualOutput;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 9c47a19..56874a6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1538,6 +1538,366 @@
                                          outReasonIfUnsupported,
                                          modelOptions);
 }
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
+                                                            const QueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    switch(type)
+    {
+        case LayerType::Activation :
+        {
+            auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+            return CreateActivation(*activationQueueDescriptor, info);
+        }
+        case LayerType::Addition :
+        {
+            auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+            return CreateAddition(*additionQueueDescriptor, info);
+        }
+        case LayerType::ArgMinMax :
+        {
+            auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+            return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
+        }
+        case LayerType::BatchNormalization :
+        {
+            auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+            return CreateBatchNormalization(*batchNormQueueDescriptor, info);
+        }
+        case LayerType::BatchToSpaceNd :
+        {
+            auto batchToSpaceNdQueueDescriptor
+                    = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+            return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
+        }
+        case LayerType::Cast :
+        {
+            auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+            return CreateCast(*castQueueDescriptor, info);
+        }
+        case LayerType::ChannelShuffle :
+        {
+            auto channelShuffleQueueDescriptor
+                    = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+            return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
+        }
+        case LayerType::Comparison :
+        {
+            auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+            return CreateComparison(*comparisonQueueDescriptor, info);
+        }
+        case LayerType::Concat :
+        {
+            auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+            return CreateConcat(*concatQueueDescriptor, info);
+        }
+        case LayerType::Constant :
+        {
+            auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+            return CreateConstant(*constantQueueDescriptor, info);
+        }
+        case LayerType::ConvertBf16ToFp32 :
+        {
+            auto convertBf16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+            return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp16ToFp32:
+        {
+            auto convertFp16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+            return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToBf16:
+        {
+            auto convertFp32ToBf16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+            return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToFp16:
+        {
+            auto convertFp32ToFp16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+            return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
+        }
+        case LayerType::Convolution2d:
+        {
+            auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+            return CreateConvolution2d(*convolution2dQueueDescriptor, info);
+        }
+        case LayerType::Convolution3d:
+        {
+            auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+            return CreateConvolution3d(*convolution3dQueueDescriptor, info);
+        }
+        case LayerType::Debug:
+        {
+            auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+            return CreateDebug(*debugQueueDescriptor, info);
+        }
+        case LayerType::DepthToSpace:
+        {
+            auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+            return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
+        }
+        case LayerType::DepthwiseConvolution2d:
+        {
+            auto depthwiseConvolution2DQueueDescriptor
+                    = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+            return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
+        }
+        case LayerType::Dequantize:
+        {
+            auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+            return CreateDequantize(*dequantizeQueueDescriptor, info);
+        }
+        case LayerType::DetectionPostProcess:
+        {
+            auto detectionPostProcessQueueDescriptor
+                    = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+            return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
+        }
+        case LayerType::Division:
+        {
+            auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+            return CreateDivision(*divisionQueueDescriptor, info);
+        }
+        case LayerType::ElementwiseUnary:
+        {
+            auto elementwiseUnaryQueueDescriptor
+                    = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+            return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
+
+        }
+        case LayerType::FakeQuantization:
+        {
+            auto fakeQuantizationQueueDescriptor
+                    = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
+            return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
+        }
+        case LayerType::Fill:
+        {
+            auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+            return CreateFill(*fillQueueDescriptor, info);
+        }
+        case LayerType::Floor:
+        {
+            auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+            return CreateFloor(*floorQueueDescriptor, info);
+        }
+        case LayerType::FullyConnected:
+        {
+            auto fullyConnectedQueueDescriptor
+                    = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+            return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
+        }
+        case LayerType::Gather:
+        {
+            auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+            return CreateGather(*gatherQueueDescriptor, info);
+        }
+        case LayerType::Input:
+        {
+            auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+            return CreateInput(*inputQueueDescriptor, info);
+        }
+        case LayerType::InstanceNormalization:
+        {
+            auto instanceNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+            return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
+        }
+        case LayerType::L2Normalization:
+        {
+            auto l2NormalizationQueueDescriptor
+                    = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+            return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
+        }
+        case LayerType::LogicalBinary:
+        {
+            auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+            return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
+        }
+        case LayerType::LogSoftmax:
+        {
+            auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+            return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
+        }
+        case LayerType::Lstm:
+        {
+            auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+            return CreateLstm(*lstmQueueDescriptor, info);
+        }
+        case LayerType::Maximum:
+        {
+            auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+            return CreateMaximum(*maximumQueueDescriptor, info);
+        }
+        case LayerType::Mean:
+        {
+            auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+            return CreateMean(*meanQueueDescriptor, info);
+        }
+        case LayerType::MemCopy:
+        {
+            auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+            return CreateMemCopy(*memCopyQueueDescriptor, info);
+        }
+        case LayerType::MemImport:
+        {
+            auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+            return CreateMemImport(*memImportQueueDescriptor, info);
+        }
+        case LayerType::Minimum:
+        {
+            auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+            return CreateMinimum(*minimumQueueDescriptor, info);
+        }
+        case LayerType::Multiplication:
+        {
+            auto multiplicationQueueDescriptor
+                    = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+            return CreateMultiplication(*multiplicationQueueDescriptor, info);
+        }
+        case LayerType::Normalization:
+        {
+            auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+            return CreateNormalization(*normalizationQueueDescriptor, info);
+        }
+        case LayerType::Output:
+        {
+            auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+            return CreateOutput(*outputQueueDescriptor, info);
+        }
+        case LayerType::Pad:
+        {
+            auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+            return CreatePad(*padQueueDescriptor, info);
+        }
+        case LayerType::Permute:
+        {
+            auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+            return CreatePermute(*permuteQueueDescriptor, info);
+        }
+        case LayerType::Pooling2d:
+        {
+            auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+            return CreatePooling2d(*pooling2dQueueDescriptor, info);
+        }
+        case LayerType::Pooling3d:
+        {
+            auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
+            return CreatePooling3d(*pooling3dQueueDescriptor, info);
+        }
+        case LayerType::PreCompiled:
+        {
+            auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+            return CreatePreCompiled(*preCompiledQueueDescriptor, info);
+        }
+        case LayerType::Prelu:
+        {
+            auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+            return CreatePrelu(*preluQueueDescriptor, info);
+        }
+        case LayerType::QLstm:
+        {
+            auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+            return CreateQLstm(*qlstmQueueDescriptor, info);
+        }
+        case LayerType::Quantize:
+        {
+            auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+            return CreateQuantize(*quantizeQueueDescriptor, info);
+        }
+        case LayerType::Rank:
+        {
+            auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+            return CreateRank(*rankQueueDescriptor, info);
+        }
+        case LayerType::Reduce:
+        {
+            auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+            return CreateReduce(*reduceQueueDescriptor, info);
+        }
+        case LayerType::Reshape:
+        {
+            auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+            return CreateReshape(*reshapeQueueDescriptor, info);
+        }
+        case LayerType::Resize:
+        {
+            auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+            return CreateResize(*resizeQueueDescriptor, info);
+        }
+        case LayerType::Shape:
+        {
+            auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
+            return CreateShape(*shapeQueueDescriptor, info);
+        }
+        case LayerType::Slice:
+        {
+            auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+            return CreateSlice(*sliceQueueDescriptor, info);
+        }
+        case LayerType::Softmax:
+        {
+            auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+            return CreateSoftmax(*softmaxQueueDescriptor, info);
+        }
+        case LayerType::SpaceToBatchNd:
+        {
+            auto spaceToBatchNdQueueDescriptor
+                    = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+            return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
+        }
+        case LayerType::SpaceToDepth:
+        {
+            auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+            return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
+        }
+        case LayerType::Splitter:
+        {
+            auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+            return CreateSplitter(*splitterQueueDescriptor, info);
+        }
+        case LayerType::Stack:
+        {
+            auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+            return CreateStack(*stackQueueDescriptor, info);
+        }
+        case LayerType::StridedSlice:
+        {
+            auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+            return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
+        }
+        case LayerType::Subtraction:
+        {
+            auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+            return CreateSubtraction(*subtractionQueueDescriptor, info);
+        }
+        case LayerType::Transpose:
+        {
+            auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+            return CreateTranspose(*transposeQueueDescriptor, info);
+        }
+        case LayerType::TransposeConvolution2d:
+        {
+            auto transposeConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+            return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
+        }
+        case LayerType::UnidirectionalSequenceLstm:
+        {
+            auto unidirectionalSequenceLstmQueueDescriptor
+                    = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+            return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
+        }
+        default:
+            return nullptr;
+    }
+}
+ARMNN_NO_DEPRECATE_WARN_END
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
                                                               const WorkloadInfo& /*info*/) const
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 42a506a..00e549c 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -34,6 +34,11 @@
                                                       const bool /*IsMemoryManaged*/) const override
     { return nullptr; }
 
+    std::unique_ptr<IWorkload> CreateWorkload(LayerType /*type*/,
+                                              const QueueDescriptor& /*descriptor*/,
+                                              const WorkloadInfo& /*info*/) const override
+    { return nullptr; }
+
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
                                                 const WorkloadInfo& /*info*/) const override
     { return nullptr; }
@@ -105,19 +110,14 @@
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override
     {
-        if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
+        if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
+        {
+            return CreateWorkload(armnn::LayerType::ElementwiseUnary, descriptor, info);
+        }
+        else
         {
             { return nullptr; }
         }
-        else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
-        {
-            { return nullptr; }
-        }
-        else if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
-        {
-            return CreateLogicalUnary(descriptor, info);
-        }
-        { return nullptr; }
     }
 
     std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 1782fa4..6c37d18 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1474,7 +1474,9 @@
     convolution2dQueueDescriptor.m_Weight = weights.get();
 
     // Create a convolution workload with the dummy settings
-    auto workload = referenceWorkloadFactory->CreateConvolution2d(convolution2dQueueDescriptor, workloadInfo);
+    auto workload = referenceWorkloadFactory->CreateWorkload(LayerType::Convolution2d,
+                                                             convolution2dQueueDescriptor,
+                                                             workloadInfo);
     CHECK((workload != nullptr));
     CHECK(workload.get() == PolymorphicDowncast<RefConvolution2dWorkload*>(workload.get()));
 }
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index bc5e335..06f3eb5 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -549,7 +549,7 @@
     { \
         QueueDesc desc; \
         armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
-        return factory->Create##name(desc, info); \
+        return factory->CreateWorkload(armnn::LayerType::name, desc, info); \
     } \
 };
 
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 119f874..c715d28 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -50,7 +50,8 @@
     InputQueueDescriptor invalidData;
     WorkloadInfo invalidInfo;
     //Invalid argument exception is expected, because no inputs and no outputs were defined.
-    CHECK_THROWS_AS(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefWorkloadFactory().CreateWorkload(LayerType::Input, invalidData, invalidInfo),
+                    armnn::InvalidArgumentException);
 }
 
 TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 5768705..ea9973b 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -71,7 +71,8 @@
     descriptor.m_Parameters.m_A = upperBound;
     descriptor.m_Parameters.m_B = lowerBound;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                descriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -269,7 +270,8 @@
     AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
     descriptor.m_Parameters = activationDescriptor;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                descriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -352,7 +354,8 @@
     data.m_Parameters.m_B = 0.0f;
     data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -458,7 +461,8 @@
     descriptor.m_Parameters.m_A = activationParameterA;
     descriptor.m_Parameters.m_B = activationParameterB;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                descriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -875,7 +879,8 @@
 
     descriptor.m_Parameters.m_Function = armnn::ActivationFunction::Sqrt;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                descriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -1278,9 +1283,11 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                data, info);
     ARMNN_ASSERT(workload != nullptr);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Activation,
+                                                                                      refData, refInfo);
     ARMNN_ASSERT(workloadRef != nullptr);
 
     inputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index ce8f74d..56906bc 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -16,7 +16,7 @@
     const armnn::WorkloadInfo& info,
     const armnn::AdditionQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateAddition(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::Addition, descriptor, info);
 }
 
 LayerTestResult<float,4> AdditionTest(
@@ -231,7 +231,8 @@
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+                                                                                data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -314,7 +315,8 @@
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+                                                                                data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -580,7 +582,9 @@
     AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
 
     // Create the MaxPool
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
+                                                                                queueDescriptor,
+                                                                                workloadInfo);
 
     std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
 
@@ -611,7 +615,8 @@
     AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
     AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+                                                                                   data, info);
 
     poolingInputHandle->Allocate();
     poolingOutputHandle->Allocate();
@@ -685,8 +690,10 @@
     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+                                                                                data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Addition,
+                                                                                      refData, refInfo);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index e98708f..88d92d2 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -41,7 +41,8 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ArgMinMax,
+                                                                                descriptor, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index b441405..928918c 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -94,7 +94,8 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, descriptor, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -192,7 +193,8 @@
         },
         qScale, qOffset);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -672,8 +674,10 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+            = refWorkloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, refData, refInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index 23e790f..a5fdfa6 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -64,7 +64,8 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchToSpaceNd,
+                                                                                data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
index 92bce4f..8d60cf1 100644
--- a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
@@ -41,7 +41,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateCast(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Cast, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
index 08a68cc..74b7bc3 100644
--- a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
@@ -35,7 +35,8 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateChannelShuffle(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::ChannelShuffle, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 2640c32..016d278 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -66,7 +66,8 @@
     AddInputToWorkload(qDescriptor, info, inputTensorInfo1, inputHandle1.get());
     AddOutputToWorkload(qDescriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateComparison(qDescriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::Comparison, qDescriptor, info);
 
     inputHandle0->Allocate();
     inputHandle1->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 8fbd3e3..88a392c 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -147,7 +147,9 @@
     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Permute,
+                                                                         queueDescriptor,
+                                                                         workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -379,7 +381,8 @@
 
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload
+            = workloadFactory.CreateWorkload(LayerType::Concat, queueDescriptor, workloadInfo);
 
     for (auto& inputHandle : inputHandles)
     {
@@ -2069,7 +2072,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2217,7 +2220,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2549,7 +2552,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2687,7 +2690,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2826,7 +2829,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index f7d8415..c9da749 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -108,7 +108,9 @@
     armnn::WorkloadInfo info;
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Constant,
+                                                                                descriptor,
+                                                                                info);
 
     outputHandle->Allocate();
 
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 0982d01..33dfc23 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -344,7 +344,9 @@
     data.m_Parameters.m_DilationX = dilationX;
     data.m_Parameters.m_DilationY = dilationY;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+                                                                                data,
+                                                                                info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -444,7 +446,9 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+                                                                                data,
+                                                                                info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -570,7 +574,9 @@
     data.m_Parameters.m_PadBottom      = padSize;
     data.m_Parameters.m_BiasEnabled    = biasEnabled;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+                                                                                data,
+                                                                                info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -1388,8 +1394,10 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload    = workloadFactory.CreateConvolution2d(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+            = refWorkloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, refData, refInfo);
 
     outputHandleRef->Allocate();
     inputHandleRef->Allocate();
@@ -1756,7 +1764,8 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -1905,7 +1914,8 @@
     data.m_Parameters.m_BiasEnabled = biasEnabled;
     data.m_Parameters.m_DataLayout = layout;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -2100,7 +2110,8 @@
     data.m_Parameters.m_BiasEnabled = biasEnabled;
     data.m_Parameters.m_DataLayout = layout;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -2266,7 +2277,8 @@
     data.m_Parameters.m_DilationX = dilationX;
     data.m_Parameters.m_DilationY = dilationY;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -2989,8 +3001,10 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+            = refWorkloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, refData, refInfo);
 
     outputHandleRef->Allocate();
     inputHandleRef->Allocate();
@@ -3474,7 +3488,9 @@
     AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConvolution2d(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload= workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+                                                                        queueDescriptor,
+                                                                        workloadInfo);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
@@ -3740,7 +3756,9 @@
     AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
+                                                                         queueDescriptor,
+                                                                         workloadInfo);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index 45cf48b..406fcb4 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -275,7 +275,9 @@
         AddInputToWorkload(data, info, biasDesc, input2Handle.get());
     }
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution3d(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution3d,
+                                                                                data,
+                                                                                info);
     input0Handle->Allocate();
     input1Handle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
index e4ba0b8..49e1ceb 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
@@ -42,7 +42,9 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertBf16ToFp32(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertBf16ToFp32,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index 74c03d9..d581032 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -46,7 +46,9 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp16ToFp32(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp16ToFp32,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
index 667fd29..61a55f2 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
@@ -63,7 +63,9 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToBf16(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToBf16,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 13dde06..060e7a2 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -44,7 +44,9 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToFp16,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 2938489..d2cbdd1 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -56,7 +56,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDebug(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Debug,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index ee4f1b3..6476e7b 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -58,7 +58,9 @@
     AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthToSpace(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthToSpace,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index 98bf74f..e8fabea 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -39,7 +39,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDequantize(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Dequantize,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 52aacad..43e7d15 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -200,7 +200,9 @@
     AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get());
     AddOutputToWorkload(data, info, numDetectionInfo, numDetectionHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDetectionPostProcess(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DetectionPostProcess,
+                                                                                data,
+                                                                                info);
 
     boxedHandle->Allocate();
     scoreshandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
index f368834..e355ec6 100644
--- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
@@ -13,7 +13,7 @@
     const armnn::WorkloadInfo& info,
     const armnn::DivisionQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateDivision(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::Division, descriptor, info);
 }
 
 LayerTestResult<float, 4> DivisionByZeroTest(
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
index a2c88a6..c821e83 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
@@ -10,5 +10,5 @@
     const armnn::WorkloadInfo& info,
     const armnn::ElementwiseUnaryQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateElementwiseUnary(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::ElementwiseUnary, descriptor, info);
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index 613f8b7..e2fb6fa 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -54,7 +54,9 @@
     armnn::WorkloadInfo refInfo = info;
     SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FakeQuantization,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
index fae2691..1be0e40 100644
--- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
@@ -40,7 +40,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFill(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Fill, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index 527b0db..fbd9625 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -47,7 +47,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Floor, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 9361f4d..71d2d0a 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -69,7 +69,9 @@
         AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
     }
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FullyConnected,
+                                                                                data,
+                                                                                info);
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     input0Handle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index ad81968..c89d53b 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -46,7 +46,7 @@
     AddInputToWorkload(data, info, indicesInfo, indicesHandle.get());
     AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateGather(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Gather, data, info);
 
     paramsHandle->Allocate();
     indicesHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 9de7df7..bebbeda 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -48,7 +48,8 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::InstanceNormalization, descriptor, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index f7566fd..0a60658 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -79,7 +79,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::L2Normalization,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -740,7 +742,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::L2Normalization,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 016d143..cb182d6 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -52,7 +52,9 @@
     AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::LogSoftmax,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
index d2fa2bd..60e14b5 100644
--- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -48,7 +48,7 @@
     AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
 
-    auto workload = workloadFactory.CreateElementwiseUnary(qDesc, info);
+    auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ElementwiseUnary, qDesc, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -104,7 +104,7 @@
     AddInputToWorkload(qDesc, info, inputTensorInfo1, inputHandle1.get());
     AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
 
-    auto workload = workloadFactory.CreateLogicalBinary(qDesc, info);
+    auto workload = workloadFactory.CreateWorkload(armnn::LayerType::LogicalBinary, qDesc, info);
 
     inputHandle0->Allocate();
     inputHandle1->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index c04e97b..a69f727 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -314,7 +314,7 @@
     data.m_Parameters.m_PeepholeEnabled = false;
     data.m_Parameters.m_ProjectionEnabled = false;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -987,7 +987,7 @@
     data.m_Parameters.m_PeepholeEnabled = true;
     data.m_Parameters.m_ProjectionEnabled = true;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1211,7 +1211,7 @@
     AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
 
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
@@ -1464,7 +1464,7 @@
     data.m_Parameters.m_LayerNormEnabled = true;
 
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1653,7 +1653,9 @@
     data.m_OutputGateBias = &outputGateBiasTensor;
 
     // Create workload and allocate tensor handles
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantizedLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QuantizedLstm,
+                                                                                data,
+                                                                                info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1890,7 +1892,7 @@
     data.m_Parameters.m_ProjectionClip = projectionClip;
 
     // Create workload and allocate tensor handles
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -2155,7 +2157,7 @@
     data.m_Parameters.m_ProjectionClip = projectionClip;
 
     // Create workload and allocate tensor handles
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -2406,7 +2408,7 @@
     data.m_Parameters.m_ProjectionClip = projectionClip;
 
     // Create workload and allocate tensor handles
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
index 1ef8f9a..c2c7cd5 100644
--- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
@@ -13,7 +13,7 @@
     const armnn::WorkloadInfo& info,
     const armnn::MaximumQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateMaximum(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::Maximum, descriptor, info);
 }
 
 LayerTestResult<float, 4> MaximumSimpleTest(
diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
index 29dba2f..eacb4e0 100644
--- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
@@ -55,7 +55,7 @@
     AddInputToWorkload(data,  info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Mean, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
index 015d055..ff31d27 100644
--- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
@@ -13,7 +13,7 @@
     const armnn::WorkloadInfo& info,
     const armnn::MinimumQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateMinimum(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::Minimum, descriptor, info);
 }
 
 LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
index 2647bb9..d99e5c8 100644
--- a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
@@ -43,7 +43,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -86,7 +88,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -129,7 +133,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index c7b0821..eab2481 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -14,7 +14,7 @@
     const armnn::WorkloadInfo& info,
     const armnn::MultiplicationQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateMultiplication(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::Multiplication, descriptor, info);
 }
 
 LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
@@ -571,8 +571,10 @@
     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload    = workloadFactory.CreateMultiplication(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+                = workloadFactory.CreateWorkload(armnn::LayerType::Multiplication, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+                = refWorkloadFactory.CreateWorkload(armnn::LayerType::Multiplication, refData, refInfo);
 
     inputHandle0->Allocate();
     inputHandle1->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index 4f2add5..2b2ff0c 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -82,7 +82,9 @@
     armnn::WorkloadInfo refInfo = info;
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -237,7 +239,9 @@
     armnn::WorkloadInfo refInfo = info;
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -355,8 +359,10 @@
         return ret;
     }
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::Normalization, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+            = refWorkloadFactory.CreateWorkload(armnn::LayerType::Normalization, refData, refInfo);
 
     outputHandleRef->Allocate();
     inputHandleRef->Allocate();
@@ -438,7 +444,9 @@
     armnn::WorkloadInfo refInfo = info;
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index ab2ef24..b1d8c31 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -72,7 +72,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -157,7 +159,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -397,7 +401,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -470,7 +476,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index fb55f08..bffa3d4 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -39,7 +39,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Permute, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index 248f972..643a5df 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -100,7 +100,9 @@
         return result;
     }
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
+                                                                                queueDescriptor,
+                                                                                workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -830,8 +832,10 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+            = refWorkloadFactory.CreateWorkload(armnn::LayerType::Pooling2d, refData, refInfo);
 
     outputHandleRef->Allocate();
     inputHandleRef->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp
index 3befc7c..013ed03 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp
@@ -104,7 +104,9 @@
         return result;
     }
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling3d(queueDescriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling3d,
+                                                                                queueDescriptor,
+                                                                                workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -1035,8 +1037,10 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling3d(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling3d(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::Pooling3d, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef
+            = refWorkloadFactory.CreateWorkload(armnn::LayerType::Pooling3d, refData, refInfo);
 
     outputHandleRef->Allocate();
     inputHandleRef->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index a74be62..0245245 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -84,7 +84,9 @@
     AddInputToWorkload (descriptor, info, alphaTensorInfo,  alphaHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePrelu(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Prelu,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     alphaHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index 5878ec8..38d6fba 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -40,7 +40,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Quantize,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
index ef45eec..be22fc6 100644
--- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
@@ -35,7 +35,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRank(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Rank, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp
index a8b1fda..9506f36 100644
--- a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp
@@ -59,7 +59,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
index acb2990..c8a61bb 100644
--- a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
@@ -59,7 +59,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
index 47cb1d7..4edf3dd 100644
--- a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
@@ -61,7 +61,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index ae9280b..217d8e9 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -35,7 +35,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reshape, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index 1773809..2e8cc66 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -123,7 +123,9 @@
     AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Resize,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp
index 5aa3b7c..e30912c 100644
--- a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp
@@ -33,7 +33,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateShape(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Shape, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index 32abf35..95d2320 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -52,7 +52,9 @@
     AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSlice(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Slice,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index e8b4ee5..1956533 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -98,7 +98,7 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Softmax, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -283,8 +283,10 @@
     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Softmax, data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Softmax,
+                                                                                      refData,
+                                                                                      refInfo);
 
     outputHandleRef->Allocate();
     inputHandleRef->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index fe6aa70..103e336 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -69,7 +69,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSpaceToBatchNd(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::SpaceToBatchNd,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index 0080bb6..f67ed9b 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -70,7 +70,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSpaceToDepth(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::SpaceToDepth,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index cfb6a1f..f2ee7bf 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -207,7 +207,9 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle1->Allocate();
@@ -230,7 +232,9 @@
     data2.m_ViewOrigins.push_back(window3);
     data2.m_ViewOrigins.push_back(window4);
 
-    std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
+    std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
+                                                                                 data2,
+                                                                                 info2);
 
     outputHandle3->Allocate();
     outputHandle4->Allocate();
@@ -305,7 +309,9 @@
 
     data.m_ViewOrigins.push_back(window);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
index 5250c3a..252adb9 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -65,7 +65,9 @@
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
     outputHandle->Allocate();
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateStack(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Stack,
+                                                                                descriptor,
+                                                                                info);
 
     workload->Execute();
 
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index b4818bb..865b74c 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -54,7 +54,9 @@
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateStridedSlice(descriptor, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::StridedSlice,
+                                                                                descriptor,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
index 3c4946e..61e76ed 100644
--- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
@@ -13,7 +13,7 @@
     const armnn::WorkloadInfo& info,
     const armnn::SubtractionQueueDescriptor& descriptor)
 {
-    return workloadFactory.CreateSubtraction(descriptor, info);
+    return workloadFactory.CreateWorkload(armnn::LayerType::Subtraction, descriptor, info);
 }
 
 LayerTestResult<uint8_t, 4> SubtractionUint8Test(
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 95687e3..41e57de 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -96,7 +96,7 @@
     AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
 
     std::unique_ptr<armnn::IWorkload> workload =
-            workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
+            workloadFactory.CreateWorkload(armnn::LayerType::TransposeConvolution2d, queueDescriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -658,7 +658,9 @@
     AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
 
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::TransposeConvolution2d,
+                                                                         queueDescriptor,
+                                                                         workloadInfo);
     inputHandle->Allocate();
     outputHandle->Allocate();
 
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
index 82bd487..66652f2 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
@@ -39,7 +39,9 @@
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateTranspose(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Transpose,
+                                                                                data,
+                                                                                info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
index e6b50f6..66a26cc 100644
--- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
@@ -169,7 +169,8 @@
     data.m_Parameters.m_ProjectionEnabled = false;
     data.m_Parameters.m_TimeMajor = false;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -344,7 +345,8 @@
     data.m_Parameters.m_ProjectionEnabled = false;
     data.m_Parameters.m_TimeMajor = true;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -624,7 +626,8 @@
     data.m_Parameters.m_ClippingThresCell = 10.0f;
 
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -848,7 +851,8 @@
     data.m_Parameters.m_TimeMajor = false;
     data.m_Parameters.m_ClippingThresCell = 10.0f;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1008,7 +1012,8 @@
     data.m_Parameters.m_ProjectionEnabled = false;
     data.m_Parameters.m_TimeMajor = false;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1147,7 +1152,8 @@
     data.m_Parameters.m_ProjectionEnabled = false;
     data.m_Parameters.m_TimeMajor = false;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1286,7 +1292,8 @@
     data.m_Parameters.m_ProjectionEnabled = false;
     data.m_Parameters.m_TimeMajor = true;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1449,7 +1456,8 @@
     data.m_Parameters.m_ClippingThresCell = 10.0f;
 
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1640,7 +1648,8 @@
     data.m_Parameters.m_TimeMajor = false;
     data.m_Parameters.m_ClippingThresCell = 10.0f;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
@@ -1776,7 +1785,8 @@
     data.m_Parameters.m_ProjectionEnabled = false;
     data.m_Parameters.m_TimeMajor = false;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
     inputHandle->Allocate();
     outputStateInHandle->Allocate();
     cellStateInHandle->Allocate();
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4bdb84a..0632787 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -244,6 +244,451 @@
         PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
+                                                             const QueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    switch(type)
+    {
+        case LayerType::Activation :
+        {
+            auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Addition :
+        {
+            auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::ArgMinMax :
+        {
+            auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::BatchNormalization :
+        {
+            auto batchNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
+                    (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::BatchToSpaceNd :
+        {
+            auto batchToSpaceNdQueueDescriptor
+                    = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Cast :
+        {
+            auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::ChannelShuffle :
+        {
+            auto channelShuffleQueueDescriptor
+                    = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Comparison :
+        {
+            auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Concat :
+        {
+            auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Constant :
+        {
+            auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::ConvertFp16ToFp32 :
+        {
+            auto convertFp16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
+                                                             info,
+                                                             m_CLCompileContext);
+        }
+        case LayerType::ConvertFp32ToFp16 :
+        {
+            auto convertFp32ToFp16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
+                                                             info,
+                                                             m_CLCompileContext);
+        }
+        case LayerType::Convolution2d :
+        {
+            auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+
+            bool isFastMathEnabled = false;
+            if (m_ModelContextPtr)
+            {
+                if (m_ModelContextPtr.get() != nullptr)
+                {
+                    auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+                    if (modelOptions)
+                    {
+                        isFastMathEnabled = modelOptions->IsFastMathEnabled();
+                    }
+                }
+            }
+            return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
+                                                         info,
+                                                         m_MemoryManager->GetIntraLayerManager(),
+                                                         m_CLCompileContext,
+                                                         isFastMathEnabled);
+        }
+        case LayerType::Convolution3d :
+        {
+            auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+
+            bool isFastMathEnabled = false;
+            if (m_ModelContextPtr)
+            {
+                if (m_ModelContextPtr.get() != nullptr)
+                {
+                    auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+                    if (modelOptions)
+                    {
+                        isFastMathEnabled = modelOptions->IsFastMathEnabled();
+                    }
+                }
+            }
+            return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
+                                                         info,
+                                                         m_MemoryManager->GetIntraLayerManager(),
+                                                         m_CLCompileContext,
+                                                         isFastMathEnabled);
+        }
+        case LayerType::Debug :
+        {
+            auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+            return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::DepthToSpace :
+        {
+            auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::DepthwiseConvolution2d :
+        {
+            auto depthwiseConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
+                                                                info,
+                                                                m_CLCompileContext);
+        }
+        case LayerType::Dequantize :
+        {
+            auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::DetectionPostProcess :
+        {
+            auto detectionPostProcessQueueDescriptor
+                    = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+            return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
+                                                            info,
+                                                            m_CLCompileContext);
+        }
+        case LayerType::Division :
+        {
+            auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::ElementwiseUnary :
+        {
+            auto elementwiseUnaryQueueDescriptor
+                    = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+
+            switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
+            {
+                case UnaryOperation::Abs:
+                {
+                    AbsQueueDescriptor absQueueDescriptor;
+                    absQueueDescriptor.m_Inputs  = elementwiseUnaryQueueDescriptor->m_Inputs;
+                    absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+                    return  std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
+                }
+                case UnaryOperation::Exp:
+                    return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+                case UnaryOperation::Log:
+                    return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+                case UnaryOperation::LogicalNot:
+                    return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
+                                                                  info,
+                                                                  m_CLCompileContext);
+                case UnaryOperation::Neg:
+                    return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+                case UnaryOperation::Rsqrt:
+                {
+                    RsqrtQueueDescriptor rsqrtQueueDescriptor;
+                    rsqrtQueueDescriptor.m_Inputs  = elementwiseUnaryQueueDescriptor->m_Inputs;
+                    rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+                    return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
+                }
+                case UnaryOperation::Sin:
+                    return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+                default:
+                    return nullptr;
+            }
+        }
+        case LayerType::Fill :
+        {
+            auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Floor :
+        {
+            auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::FullyConnected :
+        {
+            auto fullyConnectedQueueDescriptor
+                    = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
+                                                          info,
+                                                          m_MemoryManager->GetIntraLayerManager(),
+                                                          m_CLCompileContext);
+        }
+        case LayerType::Gather :
+        {
+            auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Input :
+        {
+            auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+            return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+        }
+        case LayerType::InstanceNormalization :
+        {
+            auto instanceNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
+                                                                 info,
+                                                                 m_CLCompileContext);
+        }
+        case LayerType::L2Normalization :
+        {
+            auto l2NormalizationQueueDescriptor
+                    = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
+                                                                              info,
+                                                                              m_CLCompileContext);
+        }
+        case LayerType::LogicalBinary :
+        {
+            auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+
+            switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
+            {
+                case LogicalBinaryOperation::LogicalAnd:
+                    return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
+                                                                  info,
+                                                                  m_CLCompileContext);
+                case LogicalBinaryOperation::LogicalOr:
+                    return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
+                                                                 info,
+                                                                 m_CLCompileContext);
+                default:
+                    return nullptr;
+            }
+        }
+        case LayerType::LogSoftmax :
+        {
+            auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+
+            return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
+                                                      info,
+                                                      m_MemoryManager->GetIntraLayerManager(),
+                                                      m_CLCompileContext);
+        }
+        case LayerType::Lstm :
+        {
+            auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Maximum :
+        {
+            auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Mean :
+        {
+            auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::MemCopy :
+        {
+            auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+            if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
+            {
+                throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
+            }
+            return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+        }
+        case LayerType::MemImport :
+        {
+            auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+            if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
+            {
+                throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
+            }
+            return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+        }
+        case LayerType::Minimum :
+        {
+            auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Multiplication :
+        {
+            auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Normalization :
+        {
+            auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
+                                                                            info,
+                                                                            m_CLCompileContext);
+        }
+        case LayerType::Output :
+        {
+            auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+            return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+        }
+        case LayerType::Pad :
+        {
+            auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Permute :
+        {
+            auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Pooling2d :
+        {
+            auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::PreCompiled :
+        {
+            auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+            return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Prelu :
+        {
+            auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::QLstm :
+        {
+            auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Quantize :
+        {
+            auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::QuantizedLstm :
+        {
+            auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Rank :
+        {
+            auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
+        }
+        case LayerType::Reduce :
+        {
+            auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
+        }
+        case LayerType::Reshape :
+        {
+            auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Resize :
+        {
+            auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Slice :
+        {
+            auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Softmax :
+        {
+            auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
+                                                       info,
+                                                       m_MemoryManager->GetIntraLayerManager(),
+                                                       m_CLCompileContext);
+        }
+        case LayerType::SpaceToBatchNd :
+        {
+            auto spaceToBatchNdQueueDescriptor
+                    = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::SpaceToDepth :
+        {
+            auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Splitter :
+        {
+            auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Stack :
+        {
+            auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::StridedSlice :
+        {
+            auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Subtraction :
+        {
+            auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::Transpose :
+        {
+            auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
+        }
+        case LayerType::TransposeConvolution2d :
+        {
+            auto transposeConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+            return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
+                                                                  info,
+                                                                  m_MemoryManager->GetIntraLayerManager(),
+                                                                  m_CLCompileContext);
+        }
+        default:
+            return nullptr;
+    }
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const
 {
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 047f385..91ce711 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -55,186 +55,312 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
+    std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+                                              const QueueDescriptor& descriptor,
+                                              const WorkloadInfo& info) const override;
+
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
                                           const WorkloadInfo& Info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 43387d8..3d27390 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -99,7 +99,7 @@
     // for each channel:
     // substract mean, divide by standard deviation (with an epsilon to avoid div by 0)
     // multiply by gamma and add beta
-    std::unique_ptr<IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::BatchNormalization, data, info);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 2d0b890..19d322b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -131,6 +131,435 @@
     return tensorHandle;
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
+                                                               const QueueDescriptor& descriptor,
+                                                               const WorkloadInfo& info) const
+{
+    switch(type)
+    {
+        case LayerType::Activation :
+        {
+            auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
+        }
+        case LayerType::Addition :
+        {
+            auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
+        }
+        case LayerType::ArgMinMax :
+        {
+            auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
+        }
+        case LayerType::BatchNormalization :
+        {
+            auto batchNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
+        }
+        case LayerType::BatchToSpaceNd :
+        {
+            auto batchToSpaceNdQueueDescriptor
+                    = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
+        }
+        case LayerType::Cast :
+        {
+            auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
+        }
+        case LayerType::ChannelShuffle :
+        {
+            auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
+        }
+        case LayerType::Comparison :
+        {
+            auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
+        }
+        case LayerType::Concat :
+        {
+            auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
+        }
+        case LayerType::Constant :
+        {
+            auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
+        }
+        case LayerType::ConvertBf16ToFp32 :
+        {
+            auto convertBf16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp16ToFp32 :
+        {
+            auto convertFp16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToBf16 :
+        {
+            auto convertFp32ToBf16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToFp16 :
+        {
+            auto convertFp32ToFp16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
+        }
+        case LayerType::Convolution2d :
+        {
+            auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+
+            bool isFastMathEnabled = false;
+            if (m_ModelContextPtr)
+            {
+                if (m_ModelContextPtr.get() != nullptr)
+                {
+                    auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
+                    if (modelOptions)
+                    {
+                        isFastMathEnabled = modelOptions->IsFastMathEnabled();
+                    }
+                }
+            }
+            return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
+                                                               info,
+                                                               m_MemoryManager->GetIntraLayerManager(),
+                                                               isFastMathEnabled);
+        }
+        case LayerType::Convolution3d :
+        {
+            auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+
+            bool isFastMathEnabled = false;
+            if (m_ModelContextPtr)
+            {
+                if (m_ModelContextPtr.get() != nullptr)
+                {
+                    auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
+                    if (modelOptions)
+                    {
+                        isFastMathEnabled = modelOptions->IsFastMathEnabled();
+                    }
+                }
+            }
+            return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
+                                                               info,
+                                                               m_MemoryManager->GetIntraLayerManager(),
+                                                               isFastMathEnabled);
+        }
+        case LayerType::Debug :
+        {
+            auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
+        }
+        case LayerType::DepthToSpace :
+        {
+            auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
+        }
+        case LayerType::DepthwiseConvolution2d :
+        {
+            auto depthwiseConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
+        }
+        case LayerType::Dequantize :
+        {
+            auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
+        }
+        case LayerType::DetectionPostProcess :
+        {
+            auto detectionPostProcessQueueDescriptor
+                    = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
+        }
+        case LayerType::Division :
+        {
+            auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
+        }
+        case LayerType::ElementwiseUnary :
+        {
+            auto elementwiseUnaryQueueDescriptor
+                    = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+
+            switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
+            {
+                case UnaryOperation::Abs:
+                {
+                    AbsQueueDescriptor absQueueDescriptor;
+                    absQueueDescriptor.m_Inputs  = elementwiseUnaryQueueDescriptor->m_Inputs;
+                    absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+                    return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
+                }
+                case UnaryOperation::Exp:
+                    return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
+                case UnaryOperation::LogicalNot:
+                    return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
+                case UnaryOperation::Log:
+                    return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
+                case UnaryOperation::Neg:
+                    return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
+                case UnaryOperation::Rsqrt:
+                {
+                    RsqrtQueueDescriptor rsqrtQueueDescriptor;
+                    rsqrtQueueDescriptor.m_Inputs  = elementwiseUnaryQueueDescriptor->m_Inputs;
+                    rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+                    return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
+                }
+                case UnaryOperation::Sin:
+                    return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
+                default:
+                    return nullptr;
+            }
+        }
+        case LayerType::Fill :
+        {
+            auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
+        }
+        case LayerType::Floor :
+        {
+            auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
+        }
+        case LayerType::FullyConnected :
+        {
+            auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
+                                                                info,
+                                                                m_MemoryManager->GetIntraLayerManager());
+        }
+        case LayerType::Gather :
+        {
+            auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
+        }
+        case LayerType::Input :
+        {
+            auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+            return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+        }
+        case LayerType::InstanceNormalization :
+        {
+            auto instanceNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
+        }
+        case LayerType::L2Normalization :
+        {
+            auto l2NormalizationQueueDescriptor
+                    = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
+                    (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
+        }
+        case LayerType::LogSoftmax :
+        {
+            auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
+                                                            info,
+                                                            m_MemoryManager->GetIntraLayerManager());
+        }
+        case LayerType::LogicalBinary :
+        {
+            auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+
+            switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
+            {
+                case LogicalBinaryOperation::LogicalAnd:
+                    return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
+                case LogicalBinaryOperation::LogicalOr:
+                    return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
+                default:
+                    return nullptr;
+            }
+        }
+        case LayerType::Lstm :
+        {
+            auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
+        }
+        case LayerType::Maximum :
+        {
+            auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
+        }
+        case LayerType::Mean :
+        {
+            auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
+        }
+        case LayerType::MemCopy :
+        {
+            auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+            if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
+            {
+                throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
+            }
+            return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+        }
+        case LayerType::MemImport :
+        {
+            auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+            if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
+            {
+                throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
+            }
+            return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+        }
+        case LayerType::Minimum :
+        {
+            auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
+        }
+        case LayerType::Multiplication :
+        {
+            auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
+        }
+        case LayerType::Normalization :
+        {
+            auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
+                    (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
+        }
+        case LayerType::Output :
+        {
+            auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+            return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+        }
+        case LayerType::Pad :
+        {
+            auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
+        }
+        case LayerType::Permute :
+        {
+            auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
+        }
+        case LayerType::Pooling2d :
+        {
+            auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
+        }
+        case LayerType::PreCompiled :
+        {
+            auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+            return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
+        }
+        case LayerType::Prelu :
+        {
+            auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
+        }
+        case LayerType::QLstm :
+        {
+            auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
+        }
+        case LayerType::Quantize :
+        {
+            auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
+        }
+        case LayerType::QuantizedLstm :
+        {
+            auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
+        }
+        case LayerType::Rank :
+        {
+            auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
+        }
+        case LayerType::Reduce :
+        {
+            auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
+        }
+        case LayerType::Reshape :
+        {
+            auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
+        }
+        case LayerType::Resize :
+        {
+            auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
+        }
+        case LayerType::Slice :
+        {
+            auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
+        }
+        case LayerType::Softmax :
+        {
+            auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
+                                                         info,
+                                                         m_MemoryManager->GetIntraLayerManager());
+        }
+        case LayerType::SpaceToBatchNd :
+        {
+            auto spaceToBatchNdQueueDescriptor
+                    = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
+        }
+        case LayerType::SpaceToDepth :
+        {
+            auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
+        }
+        case LayerType::Splitter :
+        {
+            auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
+        }
+        case LayerType::Stack :
+        {
+            auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
+        }
+        case LayerType::StridedSlice :
+        {
+            auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
+        }
+        case LayerType::Subtraction :
+        {
+            auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
+        }
+        case LayerType::Transpose :
+        {
+            auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
+        }
+        case LayerType::TransposeConvolution2d :
+        {
+            auto transposeConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
+                                                                        info,
+                                                                        m_MemoryManager->GetIntraLayerManager());
+        }
+        default:
+            return nullptr;
+    }
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                  const WorkloadInfo&              info) const
 {
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index f44681a..802b9e1 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -52,192 +52,322 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
+    std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+                                              const QueueDescriptor& descriptor,
+                                              const WorkloadInfo& info) const override;
+
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& Info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                 const WorkloadInfo& Info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
-    
+
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& Info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
                                           const WorkloadInfo& Info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index 6f96df5..7d73df8 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -78,7 +78,8 @@
     descriptor.m_Parameters.m_A = upperBound;
     descriptor.m_Parameters.m_B = lowerBound;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+    std::unique_ptr<armnn::IWorkload> workload
+            = workloadFactory.CreateWorkload(LayerType::Activation, descriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 01e7a3e..9db81fc 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <Layer.hpp>
@@ -141,6 +141,511 @@
     }
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
+                                                              const QueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    switch(type)
+    {
+        case LayerType::Activation :
+        {
+            auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
+        }
+        case LayerType::Addition :
+        {
+            auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
+            }
+        }
+        case LayerType::ArgMinMax :
+        {
+            auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
+        }
+        case LayerType::BatchNormalization :
+        {
+            auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
+        }
+        case LayerType::BatchToSpaceNd :
+        {
+            auto batchToSpaceNdQueueDescriptor
+                    = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
+       }
+        case LayerType::Cast :
+        {
+            auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
+        }
+        case LayerType::ChannelShuffle :
+        {
+            auto channelShuffleQueueDescriptor
+                    = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
+        }
+        case LayerType::Comparison :
+        {
+            auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
+        }
+        case LayerType::Concat :
+        {
+            auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
+        }
+        case LayerType::Constant :
+        {
+            auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
+        }
+        case LayerType::ConvertBf16ToFp32 :
+        {
+            auto convertBf16ToFp32QueueDescriptor
+                = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp16ToFp32:
+        {
+            auto convertFp16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToBf16:
+        {
+            auto convertFp32ToBf16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToFp16:
+        {
+            auto convertFp32ToFp16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
+        }
+        case LayerType::Convolution2d:
+        {
+            auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
+        }
+        case LayerType::Convolution3d:
+        {
+            auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
+        }
+        case LayerType::Debug:
+        {
+            auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+            if (IsBFloat16(info))
+            {
+                return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsFloat16(info))
+            {
+                return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQSymmS16(info))
+            {
+                return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQSymmS8(info))
+            {
+                return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQAsymmU8(info))
+            {
+                return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQAsymmS8(info))
+            {
+                return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsSigned32(info))
+            {
+                return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
+            }
+
+            return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+        }
+        case LayerType::DepthToSpace:
+        {
+            auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
+        }
+        case LayerType::DepthwiseConvolution2d:
+        {
+            auto depthwiseConvolution2DQueueDescriptor
+                = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
+        }
+        case LayerType::Dequantize:
+        {
+            auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
+        }
+        case LayerType::DetectionPostProcess:
+        {
+            auto detectionPostProcessQueueDescriptor
+                = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
+        }
+        case LayerType::Division:
+        {
+            auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
+            }
+        }
+        case LayerType::ElementwiseUnary:
+        {
+            auto elementwiseUnaryQueueDescriptor
+                = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+            if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
+            {
+                return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
+            }
+            return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
+        }
+        case LayerType::FakeQuantization:
+        {
+            auto fakeQuantizationQueueDescriptor
+                = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
+        }
+        case LayerType::Fill:
+        {
+            auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
+        }
+        case LayerType::Floor:
+        {
+            auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+            if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
+            {
+                return nullptr;
+            }
+            else
+            {
+                return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
+            }
+        }
+        case LayerType::FullyConnected:
+        {
+            auto fullyConnectedQueueDescriptor
+                    = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
+        }
+        case LayerType::Gather:
+        {
+            auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
+        }
+        case LayerType::Input:
+        {
+            auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos.empty() )
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
+            }
+            if (info.m_OutputTensorInfos.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
+            }
+
+            if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
+                                               "data input and output differ in byte count.");
+            }
+
+            return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+        }
+        case LayerType::InstanceNormalization:
+        {
+            auto instanceNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
+        }
+        case LayerType::L2Normalization:
+        {
+            auto l2NormalizationQueueDescriptor
+                    = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
+        }
+        case LayerType::LogicalBinary:
+        {
+            auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
+        }
+        case LayerType::LogSoftmax:
+        {
+            auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
+        }
+        case LayerType::Lstm:
+        {
+            auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
+        }
+        case LayerType::Maximum:
+        {
+            auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
+            }
+        }
+        case LayerType::Mean:
+        {
+            auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+            return  std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
+        }
+        case LayerType::MemCopy:
+        {
+            auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+            if (descriptor.m_Inputs.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
+            }
+            return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+        }
+        case LayerType::MemImport:
+        {
+            auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+            if (descriptor.m_Inputs.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
+            }
+            return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+        }
+        case LayerType::Minimum:
+        {
+            auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
+            }
+        }
+        case LayerType::Multiplication:
+        {
+            auto multiplicationQueueDescriptor
+                    = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
+            }
+        }
+        case LayerType::Normalization:
+        {
+            auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
+        }
+        case LayerType::Output:
+        {
+            auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos.empty() )
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
+            }
+            if (info.m_OutputTensorInfos.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
+            }
+            if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
+                                               "differ in byte count.");
+            }
+
+            return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+        }
+        case LayerType::Pad:
+        {
+            auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
+        }
+        case LayerType::Permute:
+        {
+            auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+            if (IsQSymmS16(info))
+            {
+                return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
+            }
+            else if (IsBFloat16(info))
+            {
+                return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
+            }
+            else if (IsQAsymmS8(info))
+            {
+                return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
+            }
+            return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
+                    NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
+        }
+        case LayerType::Pooling2d:
+        {
+            auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
+        }
+        case LayerType::Pooling3d:
+        {
+            auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
+        }
+        case LayerType::PreCompiled:
+        {
+            return nullptr;
+        }
+        case LayerType::Prelu:
+        {
+            auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
+        }
+        case LayerType::QLstm:
+        {
+            auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
+        }
+        case LayerType::Quantize:
+        {
+            auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
+        }
+        case LayerType::Rank:
+        {
+            auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
+        }
+        case LayerType::Reduce:
+        {
+            auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
+        }
+        case LayerType::Reshape:
+        {
+            auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
+        }
+        case LayerType::Resize:
+        {
+            auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
+        }
+        case LayerType::Shape:
+        {
+            auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
+        }
+        case LayerType::Slice:
+        {
+            auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
+        }
+        case LayerType::Softmax:
+        {
+            auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
+        }
+        case LayerType::SpaceToBatchNd:
+        {
+            auto spaceToBatchNdQueueDescriptor
+                    = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
+        }
+        case LayerType::SpaceToDepth:
+        {
+            auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
+        }
+        case LayerType::Splitter:
+        {
+            auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
+        }
+        case LayerType::Stack:
+        {
+            auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
+        }
+        case LayerType::StridedSlice:
+        {
+            auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
+        }
+        case LayerType::Subtraction:
+        {
+            auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
+            }
+        }
+        case LayerType::Transpose:
+        {
+            auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+            if (IsQSymmS16(info))
+            {
+                return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
+            }
+            else if (IsBFloat16(info))
+            {
+                return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
+            }
+            else if (IsQAsymmS8(info))
+            {
+                return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
+            }
+            return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
+                    RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
+                    (*transposeQueueDescriptor, info);
+        }
+        case LayerType::TransposeConvolution2d:
+        {
+            auto transposeConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
+        }
+        case LayerType::UnidirectionalSequenceLstm:
+        {
+            auto unidirectionalSequenceLstmQueueDescriptor
+                    = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
+                                                                           info);
+        }
+        default:
+            return nullptr;
+    }
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& info) const
 {
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 3dfd3d8..d9d4371 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -66,201 +66,337 @@
                                                       DataLayout dataLayout,
                                                       const bool IsMemoryManaged = true) const override;
 
+    std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+                                              const QueueDescriptor& descriptor,
+                                              const WorkloadInfo& info) const override;
+
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
                                                        const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
                                                       const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
                                           const WorkloadInfo& Info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
                                           const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                  const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
                                                const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const override;
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+    "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
     std::unique_ptr<IWorkload> CreateUnidirectionalSequenceLstm(
         const UnidirectionalSequenceLstmQueueDescriptor& descriptor,
         const WorkloadInfo& info) const override;