IVGCVSW-5482 'Add a ClCompileContext parameter to each ClWorkload Constructor'

* Injected CLCompileContext object to each CL workload.

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I4837dbd3d5b56cf743b3b89c944e3cdf8b11a42a
diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
index 3f528a1..a9f1d91 100644
--- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
+++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp
@@ -15,7 +15,9 @@
 static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
 
 ClConvertFp32ToFp16Workload::ClConvertFp32ToFp16Workload(
-    const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) :
+    const ConvertFp32ToFp16QueueDescriptor& descriptor,
+    const WorkloadInfo& info,
+    const arm_compute::CLCompileContext& clCompileContext) :
     Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>(descriptor, info)
 {
     this->m_Data.ValidateInputsOutputs("ClConvertFp32ToFp16Workload", 1, 1);
@@ -23,7 +25,7 @@
     arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
 
-    m_Layer.configure(&input, &output, g_AclConvertPolicy, 0);
+    m_Layer.configure(clCompileContext, &input, &output, g_AclConvertPolicy, 0);
 }
 
 void ClConvertFp32ToFp16Workload::Execute() const