IVGCVSW-1951 Remove type templating from ClConvolution2dWorkload

Change-Id: Iaa99500bfa8cea846f57636590698f9c8a8c2de0
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index a17997b..e23c70e 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -19,7 +19,7 @@
 #include "workloads/ClBatchNormalizationFloatWorkload.hpp"
 #include "workloads/ClConvertFp16ToFp32Workload.hpp"
 #include "workloads/ClConvertFp32ToFp16Workload.hpp"
-#include "workloads/ClConvolution2dBaseWorkload.hpp"
+#include "workloads/ClConvolution2dWorkload.hpp"
 #include "workloads/ClDepthwiseConvolutionBaseWorkload.hpp"
 #include "workloads/ClDivisionFloatWorkload.hpp"
 #include "workloads/ClL2NormalizationFloatWorkload.hpp"
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index bf17382..2e07445 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -144,8 +144,7 @@
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
                                                                          const WorkloadInfo&               info) const
 {
-    return MakeWorkload<ClConvolution2dFloatWorkload, ClConvolution2dUint8Workload>(descriptor, info,
-                                                                              m_MemoryManager.GetIntraLayerManager());
+    return std::make_unique<ClConvolution2dWorkload>(descriptor, info, m_MemoryManager.GetIntraLayerManager());
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 81540cc..c86940c 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -21,9 +21,7 @@
         workloads/ClConstantUint8Workload.cpp \
         workloads/ClConvertFp16ToFp32Workload.cpp \
         workloads/ClConvertFp32ToFp16Workload.cpp \
-        workloads/ClConvolution2dBaseWorkload.cpp \
-        workloads/ClConvolution2dFloatWorkload.cpp \
-        workloads/ClConvolution2dUint8Workload.cpp \
+        workloads/ClConvolution2dWorkload.cpp \
         workloads/ClDepthwiseConvolutionBaseWorkload.cpp \
         workloads/ClDepthwiseConvolutionFloatWorkload.cpp \
         workloads/ClDepthwiseConvolutionUint8Workload.cpp \
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 9c48dc9..5a1653e 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -23,12 +23,8 @@
     ClConvertFp16ToFp32Workload.hpp
     ClConvertFp32ToFp16Workload.cpp
     ClConvertFp32ToFp16Workload.hpp
-    ClConvolution2dBaseWorkload.cpp
-    ClConvolution2dBaseWorkload.hpp
-    ClConvolution2dFloatWorkload.cpp
-    ClConvolution2dFloatWorkload.hpp
-    ClConvolution2dUint8Workload.cpp
-    ClConvolution2dUint8Workload.hpp
+    ClConvolution2dWorkload.cpp
+    ClConvolution2dWorkload.hpp
     ClDepthwiseConvolutionBaseWorkload.cpp
     ClDepthwiseConvolutionBaseWorkload.hpp
     ClDepthwiseConvolutionFloatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClConvolution2dBaseWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dBaseWorkload.cpp
deleted file mode 100644
index 58699a8..0000000
--- a/src/backends/cl/workloads/ClConvolution2dBaseWorkload.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvolution2dBaseWorkload.hpp"
-#include <backends/cl/ClLayerSupport.hpp>
-#include <backends/cl/ClTensorHandle.hpp>
-#include <backends/aclCommon/ArmComputeUtils.hpp>
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
-    const TensorInfo& output,
-    const Convolution2dDescriptor& descriptor,
-    const TensorInfo& weights,
-    const boost::optional<TensorInfo>& biases)
-{
-    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
-    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
-    const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
-
-    arm_compute::TensorInfo aclBiasesInfo;
-    arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
-
-    if (descriptor.m_BiasEnabled)
-    {
-        BOOST_ASSERT(biases.is_initialized());
-
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
-        optionalAclBiasesInfo = &aclBiasesInfo;
-    }
-
-    arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
-
-    return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
-                                                     &aclWeightsInfo,
-                                                     optionalAclBiasesInfo,
-                                                     &aclOutputInfo,
-                                                     layerInfo);
-}
-
-}
diff --git a/src/backends/cl/workloads/ClConvolution2dBaseWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dBaseWorkload.hpp
deleted file mode 100644
index a983dba..0000000
--- a/src/backends/cl/workloads/ClConvolution2dBaseWorkload.hpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/Tensor.hpp>
-#include <armnn/Descriptors.hpp>
-
-#include <boost/optional.hpp>
-
-#include <arm_compute/core/Error.h>
-
-namespace armnn
-{
-
-arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
-    const TensorInfo& output,
-    const Convolution2dDescriptor& descriptor,
-    const TensorInfo& weights,
-    const boost::optional<TensorInfo>& biases);
-
-} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp
deleted file mode 100644
index 0d70ddb..0000000
--- a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvolution2dFloatWorkload.hpp"
-#include <backends/cl/ClTensorHandle.hpp>
-#include <backends/CpuTensorHandle.hpp>
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/cl/ClLayerSupport.hpp>
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor,
-    const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
-    : FloatWorkload<Convolution2dQueueDescriptor>(descriptor, info)
-    , m_ConvolutionLayer(memoryManager)
-{
-
-    // todo: check tensor shapes match.
-    const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
-
-    m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
-    BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
-
-    arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
-                                             m_Data.m_Parameters.m_StrideY,
-                                             m_Data.m_Parameters.m_PadLeft,
-                                             m_Data.m_Parameters.m_PadRight,
-                                             m_Data.m_Parameters.m_PadTop,
-                                             m_Data.m_Parameters.m_PadBottom,
-                                             arm_compute::DimensionRoundingType::FLOOR);
-
-    if (m_Data.m_Parameters.m_BiasEnabled)
-    {
-        m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
-        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
-    }
-
-    m_Data.ValidateInputsOutputs("ClConvolution2dFloat32Workload", 1, 1);
-
-    arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
-    input.info()->set_data_layout(aclDataLayout);
-    output.info()->set_data_layout(aclDataLayout);
-
-    m_ConvolutionLayer.configure(&input,
-                                 m_KernelTensor.get(),
-                                 m_BiasTensor.get(),
-                                 &output,
-                                 padStrideInfo);
-
-    InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
-
-    if (m_BiasTensor)
-    {
-        InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
-    }
-
-    // Force Compute Library to perform the necessary copying and reshaping, after which
-    // delete all the input tensors that will no longer be needed
-    m_ConvolutionLayer.prepare();
-    FreeUnusedTensors();
-}
-
-void ClConvolution2dFloatWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dFloat32Workload_Execute");
-
-    m_ConvolutionLayer.run();
-}
-
-void ClConvolution2dFloatWorkload::FreeUnusedTensors()
-{
-    FreeTensorIfUnused(m_KernelTensor);
-    FreeTensorIfUnused(m_BiasTensor);
-}
-
-} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dFloatWorkload.hpp
deleted file mode 100644
index 1f9710e..0000000
--- a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backends/Workload.hpp>
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-class ClConvolution2dFloatWorkload : public FloatWorkload<Convolution2dQueueDescriptor>
-{
-public:
-    ClConvolution2dFloatWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
-                                 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
-    void Execute() const override;
-
-private:
-    mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
-
-    std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
-    std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
-
-    void FreeUnusedTensors();
-};
-
-} //namespace armnn
-
diff --git a/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp b/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp
deleted file mode 100644
index 4f8da34..0000000
--- a/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClConvolution2dUint8Workload.hpp"
-#include <backends/cl/ClTensorHandle.hpp>
-#include <backends/CpuTensorHandle.hpp>
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/cl/ClLayerSupport.hpp>
-
-#include "ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor,
-    const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
-    : Uint8Workload<Convolution2dQueueDescriptor>(descriptor, info)
-    , m_ConvolutionLayer(memoryManager)
-{
-    // todo: check tensor shapes match
-    const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
-
-    m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
-    BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
-
-    arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
-                                             m_Data.m_Parameters.m_StrideY,
-                                             m_Data.m_Parameters.m_PadLeft,
-                                             m_Data.m_Parameters.m_PadRight,
-                                             m_Data.m_Parameters.m_PadTop,
-                                             m_Data.m_Parameters.m_PadBottom,
-                                             arm_compute::DimensionRoundingType::FLOOR);
-
-    if (m_Data.m_Parameters.m_BiasEnabled)
-    {
-        m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
-        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
-    }
-
-    m_Data.ValidateInputsOutputs("ClConvolution2dUint8Workload", 1, 1);
-
-    arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
-    input.info()->set_data_layout(aclDataLayout);
-    output.info()->set_data_layout(aclDataLayout);
-
-    m_ConvolutionLayer.configure(&input,
-                                 m_KernelTensor.get(),
-                                 m_BiasTensor.get(),
-                                 &output,
-                                 padStrideInfo);
-
-    InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
-
-    if (m_BiasTensor)
-    {
-        InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
-    }
-
-    // Force Compute Library to perform the necessary copying and reshaping, after which
-    // delete all the input tensors that will no longer be needed
-    m_ConvolutionLayer.prepare();
-    FreeUnusedTensors();
-}
-
-void ClConvolution2dUint8Workload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dUint8Workload_Execute");
-
-    m_ConvolutionLayer.run();
-}
-
-void ClConvolution2dUint8Workload::FreeUnusedTensors()
-{
-    FreeTensorIfUnused(m_KernelTensor);
-    FreeTensorIfUnused(m_BiasTensor);
-}
-
-} //namespace armnn
-
diff --git a/src/backends/cl/workloads/ClConvolution2dUint8Workload.hpp b/src/backends/cl/workloads/ClConvolution2dUint8Workload.hpp
deleted file mode 100644
index 1720ec9..0000000
--- a/src/backends/cl/workloads/ClConvolution2dUint8Workload.hpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backends/Workload.hpp>
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include <arm_compute/runtime/MemoryManagerOnDemand.h>
-
-#include <memory>
-
-namespace armnn
-{
-
-class ClConvolution2dUint8Workload : public Uint8Workload<Convolution2dQueueDescriptor>
-{
-public:
-    ClConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
-                                 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
-    void Execute() const override;
-
-private:
-    mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
-
-    std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
-    std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
-
-    void FreeUnusedTensors();
-};
-
-} //namespace armnn
-
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
new file mode 100644
index 0000000..521711b
--- /dev/null
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClConvolution2dWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <backends/cl/ClLayerSupport.hpp>
+#include <backends/cl/ClTensorHandle.hpp>
+#include <backends/cl/ClLayerSupport.hpp>
+#include <backends/aclCommon/ArmComputeUtils.hpp>
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
+#include <backends/CpuTensorHandle.hpp>
+
+#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    const Convolution2dDescriptor& descriptor,
+                                                    const TensorInfo& weights,
+                                                    const boost::optional<TensorInfo>& biases)
+{
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+    const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
+
+    arm_compute::TensorInfo aclBiasesInfo;
+    arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
+
+    if (descriptor.m_BiasEnabled)
+    {
+        BOOST_ASSERT(biases.is_initialized());
+
+        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+        optionalAclBiasesInfo = &aclBiasesInfo;
+    }
+
+    arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
+
+    return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
+                                                     &aclWeightsInfo,
+                                                     optionalAclBiasesInfo,
+                                                     &aclOutputInfo,
+                                                     layerInfo);
+}
+
+ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
+    const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+    : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
+    , m_ConvolutionLayer(memoryManager)
+{
+    // todo: check tensor shapes match.
+    const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
+
+    m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
+    BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
+
+    arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
+                                             m_Data.m_Parameters.m_StrideY,
+                                             m_Data.m_Parameters.m_PadLeft,
+                                             m_Data.m_Parameters.m_PadRight,
+                                             m_Data.m_Parameters.m_PadTop,
+                                             m_Data.m_Parameters.m_PadBottom,
+                                             arm_compute::DimensionRoundingType::FLOOR);
+
+    if (m_Data.m_Parameters.m_BiasEnabled)
+    {
+        m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
+        BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
+    }
+
+    m_Data.ValidateInputsOutputs("ClConvolution2dWorkload", 1, 1);
+
+    arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    input.info()->set_data_layout(aclDataLayout);
+    output.info()->set_data_layout(aclDataLayout);
+
+    m_ConvolutionLayer.configure(&input,
+                                 m_KernelTensor.get(),
+                                 m_BiasTensor.get(),
+                                 &output,
+                                 padStrideInfo);
+
+    InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
+
+    if (m_BiasTensor)
+    {
+        InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
+    }
+
+    // Force Compute Library to perform the necessary copying and reshaping, after which
+    // delete all the input tensors that will no longer be needed
+    m_ConvolutionLayer.prepare();
+    FreeUnusedTensors();
+}
+
+void ClConvolution2dWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
+
+    m_ConvolutionLayer.run();
+}
+
+void ClConvolution2dWorkload::FreeUnusedTensors()
+{
+    FreeTensorIfUnused(m_KernelTensor);
+    FreeTensorIfUnused(m_BiasTensor);
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
new file mode 100644
index 0000000..14a39f3
--- /dev/null
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include <armnn/Descriptors.hpp>
+
+#include <backends/Workload.hpp>
+
+#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
+#include <arm_compute/runtime/MemoryManagerOnDemand.h>
+
+#include <boost/optional.hpp>
+
+#include <memory>
+
+namespace armnn
+{
+
+arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    const Convolution2dDescriptor& descriptor,
+                                                    const TensorInfo& weights,
+                                                    const boost::optional<TensorInfo>& biases);
+
+class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
+{
+public:
+    ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
+                            std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
+
+    std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
+    std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
+
+    void FreeUnusedTensors();
+};
+
+} //namespace armnn
+
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 439ad2d..0ef8fd3 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -12,8 +12,7 @@
 #include "ClBatchNormalizationFloatWorkload.hpp"
 #include "ClConstantFloatWorkload.hpp"
 #include "ClConstantUint8Workload.hpp"
-#include "ClConvolution2dFloatWorkload.hpp"
-#include "ClConvolution2dUint8Workload.hpp"
+#include "ClConvolution2dWorkload.hpp"
 #include "ClDepthwiseConvolutionFloatWorkload.hpp"
 #include "ClDepthwiseConvolutionUint8Workload.hpp"
 #include "ClDivisionFloatWorkload.hpp"
diff --git a/src/backends/test/CreateWorkloadCl.cpp b/src/backends/test/CreateWorkloadCl.cpp
index fb28ce1..e81f844 100644
--- a/src/backends/test/CreateWorkloadCl.cpp
+++ b/src/backends/test/CreateWorkloadCl.cpp
@@ -199,13 +199,12 @@
     BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
 }
 
-template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
+template <typename armnn::DataType DataType>
 static void ClConvolution2dWorkloadTest()
 {
     Graph graph;
     ClWorkloadFactory factory;
-    auto                workload = CreateConvolution2dWorkloadTest<Convolution2dWorkloadType, DataType>
-                                   (factory, graph);
+    auto workload = CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
@@ -217,22 +216,20 @@
 
 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatWorkload)
 {
-    ClConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float32>();
+    ClConvolution2dWorkloadTest<armnn::DataType::Float32>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16Workload)
 {
-    ClConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float16>();
+    ClConvolution2dWorkloadTest<armnn::DataType::Float16>();
 }
 
-
-template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
+template <typename armnn::DataType DataType>
 static void ClDirectConvolution2dWorkloadTest()
 {
     Graph graph;
     ClWorkloadFactory factory;
-    auto workload = CreateDirectConvolution2dWorkloadTest<Convolution2dWorkloadType, DataType>(
-            factory, graph);
+    auto workload = CreateDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
@@ -244,17 +241,17 @@
 
 BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
 {
-    ClDirectConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float32>();
+    ClDirectConvolution2dWorkloadTest<armnn::DataType::Float32>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
 {
-    ClDirectConvolution2dWorkloadTest<ClConvolution2dFloatWorkload, armnn::DataType::Float16>();
+    ClDirectConvolution2dWorkloadTest<armnn::DataType::Float16>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
 {
-    ClDirectConvolution2dWorkloadTest<ClConvolution2dUint8Workload, armnn::DataType::QuantisedAsymm8>();
+    ClDirectConvolution2dWorkloadTest<armnn::DataType::QuantisedAsymm8>();
 }
 
 template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>