IVGCVSW-5157 'Pipe ModelOption through Network::LoadNetwork() to Workload factory'

* Pass ModelOptions to WorkloadFactory
* Updated signature of CL and NEON Convolution2d workloads added FastMathEnabled param.


Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I536178be8e4dd4083489e69febadaf0feeba46d2
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 8b0afad..f769422 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -28,16 +28,22 @@
 class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {
 public:
-    ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
-                            std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+    ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
+                            const WorkloadInfo& info,
+                            std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
+                            const bool isFastMathEnabled = false);
     void Execute() const override;
 
+    arm_compute::ConvolutionMethod GetConvolutionMethod() const;
+
 private:
     mutable arm_compute::CLConvolutionLayer m_ConvolutionLayer;
 
     std::unique_ptr<arm_compute::CLTensor> m_KernelTensor;
     std::unique_ptr<arm_compute::CLTensor> m_BiasTensor;
 
+    arm_compute::ConvolutionMethod m_ConvolutionMethod;
+
     void FreeUnusedTensors();
 };