IVGCVSW-5156 Introduce ModelOptions to OptimizedNetwork

 * Introduced ModelOptions to IBackendInternal
 * Introduced ModelOptions to Network
 * Added FastMathEnabled parameter to Conv2d Validate function in CL and NEON
 * Added Optimizer tests

Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ib54c1e82cb3d89a52756ed499cf91b6a7fdb2063
diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp
index 44438b2..4aee070 100644
--- a/include/armnn/BackendOptions.hpp
+++ b/include/armnn/BackendOptions.hpp
@@ -14,6 +14,8 @@
 struct BackendOptions;
 using NetworkOptions = std::vector<BackendOptions>;
 
+using ModelOptions = std::vector<BackendOptions>;
+
 /// Struct for the users to pass backend specific options
 struct BackendOptions
 {
@@ -262,4 +264,21 @@
     std::vector<BackendOption> m_Options;
 };
 
+
+template <typename F>
+void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f)
+{
+    for (auto optionsGroup : options)
+    {
+        if (optionsGroup.GetBackendId() == backend)
+        {
+            for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
+            {
+                const BackendOptions::BackendOption option = optionsGroup.GetOption(i);
+                f(option.GetName(), option.GetValue());
+            }
+        }
+    }
+}
+
 } //namespace armnn
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1d4939e..70ad94f 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -614,14 +614,17 @@
         , m_ReduceFp32ToBf16(false)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
         , m_ImportEnabled(false)
+        , m_ModelOptions()
     {}
 
-    OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled)
+    OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
+        ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
         , m_ImportEnabled(importEnabled)
+        , m_ModelOptions(modelOptions)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -631,12 +634,13 @@
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
-                     bool importEnabled = false)
+                     bool importEnabled = false, ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(shapeInferenceMethod)
         , m_ImportEnabled(importEnabled)
+        , m_ModelOptions(modelOptions)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -658,6 +662,9 @@
 
     // Enable Import
     bool m_ImportEnabled;
+
+    // Enable Model Options
+    ModelOptions m_ModelOptions;
 };
 
 /// Create an optimized version of the network
diff --git a/include/armnn/backends/IBackendContext.hpp b/include/armnn/backends/IBackendContext.hpp
index b12c99f..ae85b63 100644
--- a/include/armnn/backends/IBackendContext.hpp
+++ b/include/armnn/backends/IBackendContext.hpp
@@ -4,6 +4,7 @@
 //
 #pragma once
 
+#include <armnn/BackendOptions.hpp>
 #include <armnn/IRuntime.hpp>
 #include <memory>
 
@@ -29,4 +30,10 @@
 
 using IBackendContextUniquePtr = std::unique_ptr<IBackendContext>;
 
+class IBackendModelContext
+{
+public:
+    virtual ~IBackendModelContext() {}
+};
+
 } // namespace armnn
\ No newline at end of file
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 6771e7b..ee9cb49 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -86,6 +86,8 @@
     using Optimizations = std::vector<OptimizationPtr>;
     using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
 
+    using IBackendSpecificModelContextPtr = std::shared_ptr<IBackendModelContext>;
+
     using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
     using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
 
@@ -125,12 +127,23 @@
     /// The default implementation always returns a default-constructed pointer.
     virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const;
 
+    virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions& modelOptions) const;
+
     /// Create context specifically used for profiling interaction from backends.
     virtual IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
                                                                       IBackendProfilingPtr& backendProfiling);
 
     virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
 
+    virtual ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const
+    {
+        if (modelOptions.empty())
+        {
+            return GetLayerSupport();
+        }
+        return GetLayerSupport(modelOptions);
+    }
+
     virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const;
 
     bool SupportsTensorAllocatorAPI() const;