IVGCVSW-5157 'Pipe ModelOption through Network::LoadNetwork() to Workload factory'

* Pass ModelOptions to WorkloadFactory
* Updated signature of CL and NEON Convolution2d workloads added FastMathEnabled param.


Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I536178be8e4dd4083489e69febadaf0feeba46d2
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 83f7611..d35b968 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -59,8 +59,10 @@
 }
 
 NeonConvolution2dWorkload::NeonConvolution2dWorkload(
-    const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
-    std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
+    const Convolution2dQueueDescriptor& descriptor,
+    const WorkloadInfo& info,
+    std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
+    const bool isFastMathEnabled)
     : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
 {
     using arm_compute::NEDirectConvolutionLayer;
@@ -97,7 +99,19 @@
                                 &output,
                                 padStrideInfo,
                                 arm_compute::WeightsInfo(),
-                                aclDilationInfo);
+                                aclDilationInfo,
+                                arm_compute::ActivationLayerInfo(),
+                                isFastMathEnabled);
+
+    m_ConvolutionMethod =
+        convolutionLayer->get_convolution_method(input.info(),
+                                                 m_KernelTensor->info(),
+                                                 output.info(),
+                                                 padStrideInfo,
+                                                 arm_compute::WeightsInfo(),
+                                                 aclDilationInfo,
+                                                 arm_compute::ActivationLayerInfo(),
+                                                 isFastMathEnabled);
 
     m_ConvolutionLayer.reset(convolutionLayer.release());
 
@@ -120,6 +134,11 @@
     m_ConvolutionLayer->run();
 }
 
+arm_compute::ConvolutionMethod NeonConvolution2dWorkload::GetConvolutionMethod() const
+{
+    return m_ConvolutionMethod;
+}
+
 void NeonConvolution2dWorkload::FreeUnusedTensors()
 {
     FreeTensorIfUnused(m_KernelTensor);