IVGCVSW-5157 'Pipe ModelOption through Network::LoadNetwork() to Workload factory'

* Pass ModelOptions to WorkloadFactory
* Updated signature of CL and NEON Convolution2d workloads added FastMathEnabled param.


Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I536178be8e4dd4083489e69febadaf0feeba46d2
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 09ae839..6a514e2 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <armnn/Optional.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
 
 #include <backendsCommon/WorkloadFactoryBase.hpp>
 #include <aclCommon/BaseMemoryManager.hpp>
@@ -19,12 +20,20 @@
 public:
     NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager);
 
+    NeonWorkloadFactory(const std::shared_ptr<NeonMemoryManager>& memoryManager,
+                        const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr);
+
     const BackendId& GetBackendId() const override;
 
     static bool IsLayerSupported(const Layer& layer,
                                  Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
+    static bool IsLayerSupported(const IConnectableLayer& layer,
+                                 Optional<DataType> dataType,
+                                 std::string& outReasonIfUnsupported,
+                                 const ModelOptions& modelOptions);
+
     bool SupportsSubTensors() const override { return true; }
 
     ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
@@ -238,6 +247,7 @@
 
 private:
     mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
+    const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
 };
 
 } // namespace armnn