IVGCVSW-5157 'Pipe ModelOption through Network::LoadNetwork() to Workload factory'

* Pass ModelOptions to WorkloadFactory
* Updated signature of CL and NEON Convolution2d workloads added FastMathEnabled param.


Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I536178be8e4dd4083489e69febadaf0feeba46d2
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 42c9903..7b52f27 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -59,7 +59,9 @@
 }
 
 ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
-    const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+                                                 const WorkloadInfo& info,
+                                                 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
+                                                 const bool isFastMathEnabled)
     : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
     , m_ConvolutionLayer(memoryManager)
 {
@@ -95,7 +97,20 @@
                                  &output,
                                  padStrideInfo,
                                  arm_compute::WeightsInfo(),
-                                 aclDilationInfo);
+                                 aclDilationInfo,
+                                 arm_compute::ActivationLayerInfo(),
+                                 isFastMathEnabled);
+
+    m_ConvolutionMethod =
+        m_ConvolutionLayer.get_convolution_method(input.info(),
+                                                  m_KernelTensor->info(),
+                                                  output.info(),
+                                                  padStrideInfo,
+                                                  arm_compute::WeightsInfo(),
+                                                  arm_compute::ActivationLayerInfo(),
+                                                  arm_compute::CLScheduler::get().target(),
+                                                  aclDilationInfo,
+                                                  isFastMathEnabled);
 
     InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
 
@@ -116,6 +131,11 @@
     RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
 }
 
+arm_compute::ConvolutionMethod ClConvolution2dWorkload::GetConvolutionMethod() const
+{
+    return m_ConvolutionMethod;
+}
+
 void ClConvolution2dWorkload::FreeUnusedTensors()
 {
     FreeTensorIfUnused(m_KernelTensor);