IVGCVSW-6641 Stabilize the IWorkloadFactory interface with unified strategy

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ia941be9bf2c15fe56e49a9b9a2bbe943a8152438
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 01e7a3e..9db81fc 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <Layer.hpp>
@@ -141,6 +141,511 @@
     }
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
+                                                              const QueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    switch(type)
+    {
+        case LayerType::Activation :
+        {
+            auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
+        }
+        case LayerType::Addition :
+        {
+            auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
+            }
+        }
+        case LayerType::ArgMinMax :
+        {
+            auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
+        }
+        case LayerType::BatchNormalization :
+        {
+            auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
+        }
+        case LayerType::BatchToSpaceNd :
+        {
+            auto batchToSpaceNdQueueDescriptor
+                    = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
+       }
+        case LayerType::Cast :
+        {
+            auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
+        }
+        case LayerType::ChannelShuffle :
+        {
+            auto channelShuffleQueueDescriptor
+                    = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
+        }
+        case LayerType::Comparison :
+        {
+            auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
+        }
+        case LayerType::Concat :
+        {
+            auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
+        }
+        case LayerType::Constant :
+        {
+            auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
+        }
+        case LayerType::ConvertBf16ToFp32 :
+        {
+            auto convertBf16ToFp32QueueDescriptor
+                = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp16ToFp32:
+        {
+            auto convertFp16ToFp32QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToBf16:
+        {
+            auto convertFp32ToBf16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
+        }
+        case LayerType::ConvertFp32ToFp16:
+        {
+            auto convertFp32ToFp16QueueDescriptor
+                    = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
+        }
+        case LayerType::Convolution2d:
+        {
+            auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
+        }
+        case LayerType::Convolution3d:
+        {
+            auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
+        }
+        case LayerType::Debug:
+        {
+            auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+            if (IsBFloat16(info))
+            {
+                return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsFloat16(info))
+            {
+                return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQSymmS16(info))
+            {
+                return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQSymmS8(info))
+            {
+                return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQAsymmU8(info))
+            {
+                return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsQAsymmS8(info))
+            {
+                return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
+            }
+            if (IsSigned32(info))
+            {
+                return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
+            }
+
+            return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+        }
+        case LayerType::DepthToSpace:
+        {
+            auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
+        }
+        case LayerType::DepthwiseConvolution2d:
+        {
+            auto depthwiseConvolution2DQueueDescriptor
+                = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
+        }
+        case LayerType::Dequantize:
+        {
+            auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
+        }
+        case LayerType::DetectionPostProcess:
+        {
+            auto detectionPostProcessQueueDescriptor
+                = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
+        }
+        case LayerType::Division:
+        {
+            auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
+            }
+        }
+        case LayerType::ElementwiseUnary:
+        {
+            auto elementwiseUnaryQueueDescriptor
+                = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+            if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
+            {
+                return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
+            }
+            return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
+        }
+        case LayerType::FakeQuantization:
+        {
+            auto fakeQuantizationQueueDescriptor
+                = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
+        }
+        case LayerType::Fill:
+        {
+            auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
+        }
+        case LayerType::Floor:
+        {
+            auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+            if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
+            {
+                return nullptr;
+            }
+            else
+            {
+                return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
+            }
+        }
+        case LayerType::FullyConnected:
+        {
+            auto fullyConnectedQueueDescriptor
+                    = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
+        }
+        case LayerType::Gather:
+        {
+            auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
+        }
+        case LayerType::Input:
+        {
+            auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos.empty() )
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
+            }
+            if (info.m_OutputTensorInfos.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
+            }
+
+            if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
+                                               "data input and output differ in byte count.");
+            }
+
+            return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+        }
+        case LayerType::InstanceNormalization:
+        {
+            auto instanceNormalizationQueueDescriptor
+                    = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
+        }
+        case LayerType::L2Normalization:
+        {
+            auto l2NormalizationQueueDescriptor
+                    = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
+        }
+        case LayerType::LogicalBinary:
+        {
+            auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
+        }
+        case LayerType::LogSoftmax:
+        {
+            auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
+        }
+        case LayerType::Lstm:
+        {
+            auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
+        }
+        case LayerType::Maximum:
+        {
+            auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
+            }
+        }
+        case LayerType::Mean:
+        {
+            auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+            return  std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
+        }
+        case LayerType::MemCopy:
+        {
+            auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+            if (descriptor.m_Inputs.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
+            }
+            return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+        }
+        case LayerType::MemImport:
+        {
+            auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+            if (descriptor.m_Inputs.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
+            }
+            return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+        }
+        case LayerType::Minimum:
+        {
+            auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
+            }
+        }
+        case LayerType::Multiplication:
+        {
+            auto multiplicationQueueDescriptor
+                    = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
+            }
+        }
+        case LayerType::Normalization:
+        {
+            auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
+        }
+        case LayerType::Output:
+        {
+            auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos.empty() )
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
+            }
+            if (info.m_OutputTensorInfos.empty())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
+            }
+            if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
+            {
+                throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
+                                               "differ in byte count.");
+            }
+
+            return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+        }
+        case LayerType::Pad:
+        {
+            auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
+        }
+        case LayerType::Permute:
+        {
+            auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+            if (IsQSymmS16(info))
+            {
+                return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
+            }
+            else if (IsBFloat16(info))
+            {
+                return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
+            }
+            else if (IsQAsymmS8(info))
+            {
+                return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
+            }
+            return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
+                    NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
+        }
+        case LayerType::Pooling2d:
+        {
+            auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
+        }
+        case LayerType::Pooling3d:
+        {
+            auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
+        }
+        case LayerType::PreCompiled:
+        {
+            return nullptr;
+        }
+        case LayerType::Prelu:
+        {
+            auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
+        }
+        case LayerType::QLstm:
+        {
+            auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
+        }
+        case LayerType::Quantize:
+        {
+            auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
+        }
+        case LayerType::Rank:
+        {
+            auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
+        }
+        case LayerType::Reduce:
+        {
+            auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
+        }
+        case LayerType::Reshape:
+        {
+            auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
+        }
+        case LayerType::Resize:
+        {
+            auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
+        }
+        case LayerType::Shape:
+        {
+            auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
+        }
+        case LayerType::Slice:
+        {
+            auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
+        }
+        case LayerType::Softmax:
+        {
+            auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
+        }
+        case LayerType::SpaceToBatchNd:
+        {
+            auto spaceToBatchNdQueueDescriptor
+                    = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
+        }
+        case LayerType::SpaceToDepth:
+        {
+            auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
+        }
+        case LayerType::Splitter:
+        {
+            auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
+        }
+        case LayerType::Stack:
+        {
+            auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
+        }
+        case LayerType::StridedSlice:
+        {
+            auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
+        }
+        case LayerType::Subtraction:
+        {
+            auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+            if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+            {
+                return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
+            }
+            else
+            {
+                return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
+            }
+        }
+        case LayerType::Transpose:
+        {
+            auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+            if (IsQSymmS16(info))
+            {
+                return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
+            }
+            else if (IsBFloat16(info))
+            {
+                return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
+            }
+            else if (IsQAsymmS8(info))
+            {
+                return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
+            }
+            return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
+                    RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
+                    (*transposeQueueDescriptor, info);
+        }
+        case LayerType::TransposeConvolution2d:
+        {
+            auto transposeConvolution2dQueueDescriptor
+                    = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
+        }
+        case LayerType::UnidirectionalSequenceLstm:
+        {
+            auto unidirectionalSequenceLstmQueueDescriptor
+                    = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+            return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
+                                                                           info);
+        }
+        default:
+            return nullptr;
+    }
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& info) const
 {