IVGCVSW-5156 Introduce ModelOptions to OptimizedNetwork

 * Introduced ModelOptions to IBackendInternal
 * Introduced ModelOptions to Network
 * Added FastMathEnabled parameter to Conv2d Validate function in CL and NEON
 * Added Optimizer tests

Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ib54c1e82cb3d89a52756ed499cf91b6a7fdb2063
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index f9f69f7..4b5890a 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -10,6 +10,8 @@
         ClBackendContext.cpp
         ClBackendContext.hpp
         ClBackendId.hpp
+        ClBackendModelContext.cpp
+        ClBackendModelContext.hpp
         ClContextControl.cpp
         ClContextControl.hpp
         ClLayerSupport.cpp
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index f9a8993..49636d9 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -5,6 +5,7 @@
 
 #include "ClBackend.hpp"
 #include "ClBackendId.hpp"
+#include "ClBackendModelContext.hpp"
 #include "ClWorkloadFactory.hpp"
 #include "ClBackendContext.hpp"
 #include "ClLayerSupport.hpp"
@@ -69,8 +70,7 @@
     registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(mgr));
 }
 
-IBackendInternal::IBackendContextPtr
-ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
+IBackendInternal::IBackendContextPtr ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
 {
     return IBackendContextPtr{new ClBackendContext{options}};
 }
@@ -86,9 +86,27 @@
     return Optimizations{};
 }
 
+IBackendInternal::IBackendSpecificModelContextPtr ClBackend::CreateBackendSpecificModelContext(
+    const ModelOptions& modelOptions) const
+{
+    return IBackendSpecificModelContextPtr{new ClBackendModelContext{modelOptions}};
+}
+
 IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport() const
 {
-    static ILayerSupportSharedPtr layerSupport{new ClLayerSupport};
+    static ILayerSupportSharedPtr layerSupport
+        {
+            new ClLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
+        };
+    return layerSupport;
+}
+
+IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport(const ModelOptions& modelOptions) const
+{
+    static ILayerSupportSharedPtr layerSupport
+    {
+        new ClLayerSupport(CreateBackendSpecificModelContext(modelOptions))
+    };
     return layerSupport;
 }
 
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index e85c616..108124c 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -36,8 +36,12 @@
 
     IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+
+    IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(
+        const ModelOptions& modelOptions) const override;
 };
 
 } // namespace armnn
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index 42f42b3..22a4cea 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -99,7 +99,6 @@
     {
         return value.AsBool();
     }
-
     return defaultValue;
 }
 
@@ -112,22 +111,6 @@
     return defaultValue;
 }
 
-template <typename F>
-void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f)
-{
-    for (auto optionsGroup : options)
-    {
-        if (optionsGroup.GetBackendId() == backend)
-        {
-            for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
-            {
-                const BackendOptions::BackendOption option = optionsGroup.GetOption(i);
-                f(option.GetName(), option.GetValue());
-            }
-        }
-    }
-}
-
 void ConfigureTuner(arm_compute::CLTuner &tuner, TuningLevel level)
 {
     tuner.set_tune_new_kernels(true); // Turn on tuning initially.
diff --git a/src/backends/cl/ClBackendModelContext.cpp b/src/backends/cl/ClBackendModelContext.cpp
new file mode 100644
index 0000000..0ef26b6
--- /dev/null
+++ b/src/backends/cl/ClBackendModelContext.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClBackendModelContext.hpp"
+
+namespace
+{
+
+bool ParseBool(const armnn::BackendOptions::Var& value, bool defaultValue)
+{
+    if (value.IsBool())
+    {
+        return value.AsBool();
+    }
+    return defaultValue;
+}
+
+} // namespace anonymous
+
+namespace armnn
+{
+
+ClBackendModelContext::ClBackendModelContext(const ModelOptions& modelOptions)
+    : m_IsFastMathEnabled(false)
+{
+   if (!modelOptions.empty())
+   {
+       ParseOptions(modelOptions, "GpuAcc", [&](std::string name, const BackendOptions::Var& value)
+       {
+           if (name == "FastMathEnabled")
+           {
+               m_IsFastMathEnabled |= ParseBool(value, false);
+           }
+       });
+   }
+}
+
+bool ClBackendModelContext::IsFastMathEnabled() const
+{
+    return m_IsFastMathEnabled;
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClBackendModelContext.hpp b/src/backends/cl/ClBackendModelContext.hpp
new file mode 100644
index 0000000..59f7f8f
--- /dev/null
+++ b/src/backends/cl/ClBackendModelContext.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IBackendContext.hpp>
+
+namespace armnn
+{
+
+class ClBackendModelContext : public IBackendModelContext
+{
+public:
+    ClBackendModelContext(const ModelOptions& modelOptions);
+
+    bool IsFastMathEnabled() const;
+
+private:
+    bool m_IsFastMathEnabled;
+};
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 1af5c91..7c1466e 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -5,14 +5,17 @@
 
 #include "ClLayerSupport.hpp"
 #include "ClBackendId.hpp"
+#include "ClBackendModelContext.hpp"
 
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/BackendRegistry.hpp>
 
 #include <InternalTypes.hpp>
 #include <LayerSupportCommon.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
 #if defined(ARMCOMPUTECL_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -155,6 +158,16 @@
 }
 } // anonymous namespace
 
+ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
+    : m_ModelContextPtr(modelContextPtr)
+{
+}
+
+ClLayerSupport::ClLayerSupport()
+    : m_ModelContextPtr(nullptr)
+{
+}
+
 bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
                                     const TensorInfo& output,
                                     Optional<std::string&> reasonIfUnsupported) const
@@ -322,13 +335,29 @@
                                               const Optional<TensorInfo>& biases,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
+    bool isFastMathEnabled = false;
+#if defined(ARMCOMPUTECL_ENABLED)
+    if (m_ModelContextPtr)
+    {
+        if (m_ModelContextPtr.get() != nullptr)
+        {
+            auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+            if (modelOptions)
+            {
+                isFastMathEnabled = modelOptions->IsFastMathEnabled();
+            }
+        }
+    }
+#endif
+
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   isFastMathEnabled);
 }
 
 bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index ed0486e..d7e2553 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -4,6 +4,8 @@
 //
 #pragma once
 
+#include <armnn/backends/IBackendInternal.hpp>
+
 #include <backendsCommon/LayerSupportBase.hpp>
 
 namespace armnn
@@ -12,6 +14,10 @@
 class ClLayerSupport : public LayerSupportBase
 {
 public:
+    explicit ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr);
+    ClLayerSupport();
+    ~ClLayerSupport() {}
+
     ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
@@ -318,6 +324,9 @@
                               const TransposeDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+private:
+    const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
+
 };
 
 } // namespace armnn
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 269057a..9cbe21e 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -16,6 +16,7 @@
 BACKEND_SOURCES := \
         ClBackend.cpp \
         ClBackendContext.cpp \
+        ClBackendModelContext.cpp \
         ClContextControl.cpp \
         ClLayerSupport.cpp \
         ClRegistryInitializer.cpp \
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index c2a8005..2797080 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -100,4 +100,34 @@
     BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
 }
 
+BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
+{
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input  = net->AddInputLayer(0);
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    armnn::OptimizerOptions optimizerOptions;
+    armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
+    optimizerOptions.m_ModelOptions.push_back(modelOptions);
+
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+    *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+
+    BOOST_CHECK(optimizedNet);
+
+    auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+
+    BOOST_TEST(modelOptionsOut.size() == 1);
+    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+}
+
 BOOST_AUTO_TEST_SUITE_END();
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 73ec95c..42c9903 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -24,7 +24,8 @@
                                                     const TensorInfo& output,
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const Optional<TensorInfo>& biases)
+                                                    const Optional<TensorInfo>& biases,
+                                                    bool isFastMathEnabled)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -52,7 +53,9 @@
                                                      &aclOutputInfo,
                                                      layerInfo,
                                                      arm_compute::WeightsInfo(),
-                                                     aclDilationInfo);
+                                                     aclDilationInfo,
+                                                     arm_compute::ActivationLayerInfo(),
+                                                     isFastMathEnabled);
 }
 
 ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 6d7e9f3..8b0afad 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -22,7 +22,8 @@
                                                     const TensorInfo& output,
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const Optional<TensorInfo>& biases);
+                                                    const Optional<TensorInfo>& biases,
+                                                    bool isFastMathEnabled = false);
 
 class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {