IVGCVSW-5156 Introduce ModelOptions to OptimizedNetwork

 * Introduced ModelOptions to IBackendInternal
 * Introduced ModelOptions to Network
 * Added FastMathEnabled parameter to Conv2d Validate function in CL and NEON
 * Added Optimizer tests

Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ib54c1e82cb3d89a52756ed499cf91b6a7fdb2063
diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp
index 44438b2..4aee070 100644
--- a/include/armnn/BackendOptions.hpp
+++ b/include/armnn/BackendOptions.hpp
@@ -14,6 +14,8 @@
 struct BackendOptions;
 using NetworkOptions = std::vector<BackendOptions>;
 
+using ModelOptions = std::vector<BackendOptions>;
+
 /// Struct for the users to pass backend specific options
 struct BackendOptions
 {
@@ -262,4 +264,21 @@
     std::vector<BackendOption> m_Options;
 };
 
+
+template <typename F>
+void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f)
+{
+    for (auto optionsGroup : options)
+    {
+        if (optionsGroup.GetBackendId() == backend)
+        {
+            for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
+            {
+                const BackendOptions::BackendOption option = optionsGroup.GetOption(i);
+                f(option.GetName(), option.GetValue());
+            }
+        }
+    }
+}
+
 } //namespace armnn
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1d4939e..70ad94f 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -614,14 +614,17 @@
         , m_ReduceFp32ToBf16(false)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
         , m_ImportEnabled(false)
+        , m_ModelOptions()
     {}
 
-    OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled)
+    OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
+        ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
         , m_ImportEnabled(importEnabled)
+        , m_ModelOptions(modelOptions)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -631,12 +634,13 @@
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
-                     bool importEnabled = false)
+                     bool importEnabled = false, ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(shapeInferenceMethod)
         , m_ImportEnabled(importEnabled)
+        , m_ModelOptions(modelOptions)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -658,6 +662,9 @@
 
     // Enable Import
     bool m_ImportEnabled;
+
+    // Enable Model Options
+    ModelOptions m_ModelOptions;
 };
 
 /// Create an optimized version of the network
diff --git a/include/armnn/backends/IBackendContext.hpp b/include/armnn/backends/IBackendContext.hpp
index b12c99f..ae85b63 100644
--- a/include/armnn/backends/IBackendContext.hpp
+++ b/include/armnn/backends/IBackendContext.hpp
@@ -4,6 +4,7 @@
 //
 #pragma once
 
+#include <armnn/BackendOptions.hpp>
 #include <armnn/IRuntime.hpp>
 #include <memory>
 
@@ -29,4 +30,10 @@
 
 using IBackendContextUniquePtr = std::unique_ptr<IBackendContext>;
 
+class IBackendModelContext
+{
+public:
+    virtual ~IBackendModelContext() {}
+};
+
 } // namespace armnn
\ No newline at end of file
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 6771e7b..ee9cb49 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -86,6 +86,8 @@
     using Optimizations = std::vector<OptimizationPtr>;
     using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
 
+    using IBackendSpecificModelContextPtr = std::shared_ptr<IBackendModelContext>;
+
     using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
     using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
 
@@ -125,12 +127,23 @@
     /// The default implementation always returns a default-constructed pointer.
     virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const;
 
+    virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions& modelOptions) const;
+
     /// Create context specifically used for profiling interaction from backends.
     virtual IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
                                                                       IBackendProfilingPtr& backendProfiling);
 
     virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
 
+    virtual ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const
+    {
+        if (modelOptions.empty())
+        {
+            return GetLayerSupport();
+        }
+        return GetLayerSupport(modelOptions);
+    }
+
     virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const;
 
     bool SupportsTensorAllocatorAPI() const;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 84997a6..17813a8 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1030,7 +1030,8 @@
     const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
     std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
 
-    auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
+    auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph), options.m_ModelOptions),
+                                       &IOptimizedNetwork::Destroy);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
 
@@ -2008,6 +2009,11 @@
 {
 }
 
+OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
+    : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
+{
+}
+
 OptimizedNetwork::~OptimizedNetwork()
 {
 }
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 7136ee4..b09ac45 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -273,12 +273,14 @@
     NetworkOptions m_NetworkOptions;
 
     std::unique_ptr<Graph> m_Graph;
+    ModelOptions m_ModelOptions;
 };
 
 class OptimizedNetwork final : public IOptimizedNetwork
 {
 public:
     OptimizedNetwork(std::unique_ptr<Graph> graph);
+    OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
     ~OptimizedNetwork();
 
     Status PrintGraph() override;
@@ -287,10 +289,12 @@
     profiling::ProfilingGuid GetGuid() const final { return m_Guid; };
 
     Graph& GetGraph() { return *m_Graph; }
+    ModelOptions& GetModelOptions() { return m_ModelOptions; }
 
 private:
     std::unique_ptr<Graph> m_Graph;
     profiling::ProfilingGuid m_Guid;
+    ModelOptions m_ModelOptions;
 };
 
 
diff --git a/src/backends/backendsCommon/IBackendInternal.cpp b/src/backends/backendsCommon/IBackendInternal.cpp
index a9d5a54..1cca61e 100644
--- a/src/backends/backendsCommon/IBackendInternal.cpp
+++ b/src/backends/backendsCommon/IBackendInternal.cpp
@@ -44,6 +44,12 @@
     return IBackendContextPtr{};
 }
 
+IBackendInternal::IBackendSpecificModelContextPtr IBackendInternal::CreateBackendSpecificModelContext(
+    const ModelOptions&) const
+{
+    return IBackendSpecificModelContextPtr{};
+}
+
 IBackendInternal::IBackendProfilingContextPtr IBackendInternal::CreateBackendProfilingContext(
     const IRuntime::CreationOptions&, IBackendProfilingPtr&)
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 09d7c2d..0bafda2 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -39,10 +39,11 @@
 
 } // anonymous namespace
 
-bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
-                                        const IConnectableLayer& connectableLayer,
-                                        Optional<DataType> dataType,
-                                        std::string& outReasonIfUnsupported)
+bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
+                                                     const IConnectableLayer& connectableLayer,
+                                                     Optional<DataType> dataType,
+                                                     std::string& outReasonIfUnsupported,
+                                                     const ModelOptions& modelOptions)
 {
     Optional<std::string&> reason = outReasonIfUnsupported;
     bool result;
@@ -61,7 +62,7 @@
 
     auto backendFactory = backendRegistry.GetFactory(backendId);
     auto backendObject = backendFactory();
-    auto layerSupportObject = backendObject->GetLayerSupport();
+    auto layerSupportObject = backendObject->GetLayerSupport(modelOptions);
 
     switch(layer.GetType())
     {
@@ -1212,12 +1213,34 @@
     return result;
 }
 
+bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
+                                        const IConnectableLayer& connectableLayer,
+                                        Optional<DataType> dataType,
+                                        std::string& outReasonIfUnsupported)
+{
+    return IsLayerConfigurationSupported(backendId, connectableLayer, dataType, outReasonIfUnsupported);
+}
+
 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
                                         Optional<DataType> dataType,
                                         std::string& outReasonIfUnsupported)
 {
     auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
-    return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
+    return IsLayerConfigurationSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
+}
+
+// TODO merge with defaulted modelOptions above
+bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
+                                        Optional<DataType> dataType,
+                                        std::string& outReasonIfUnsupported,
+                                        const ModelOptions& modelOptions)
+{
+    auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
+    return IsLayerConfigurationSupported(layer->GetBackendId(),
+                                         connectableLayer,
+                                         dataType,
+                                         outReasonIfUnsupported,
+                                         modelOptions);
 }
 
 // Default Implementations
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 02503f6..68f9da6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -34,6 +34,11 @@
                                  Optional<DataType> dataType,
                                  std::string& outReasonIfUnsupported);
 
+    static bool IsLayerSupported(const IConnectableLayer& layer,
+                                 Optional<DataType> dataType,
+                                 std::string& outReasonIfUnsupported,
+                                 const ModelOptions& modelOptions);
+
     virtual bool SupportsSubTensors() const = 0;
 
     ARMNN_DEPRECATED_MSG("Use ITensorHandleFactory::CreateSubTensorHandle instead")
@@ -259,6 +264,13 @@
     virtual std::unique_ptr<IWorkload> CreateTransposeConvolution2d(
         const TransposeConvolution2dQueueDescriptor& descriptor,
         const WorkloadInfo& info) const;
+
+private:
+    static bool IsLayerConfigurationSupported(const BackendId& backendId,
+                                       const IConnectableLayer& connectableLayer,
+                                       Optional<DataType> dataType,
+                                       std::string& outReasonIfUnsupported,
+                                       const ModelOptions& modelOptions = {});
 };
 
 } // namespace armnn
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index f9f69f7..4b5890a 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -10,6 +10,8 @@
         ClBackendContext.cpp
         ClBackendContext.hpp
         ClBackendId.hpp
+        ClBackendModelContext.cpp
+        ClBackendModelContext.hpp
         ClContextControl.cpp
         ClContextControl.hpp
         ClLayerSupport.cpp
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index f9a8993..49636d9 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -5,6 +5,7 @@
 
 #include "ClBackend.hpp"
 #include "ClBackendId.hpp"
+#include "ClBackendModelContext.hpp"
 #include "ClWorkloadFactory.hpp"
 #include "ClBackendContext.hpp"
 #include "ClLayerSupport.hpp"
@@ -69,8 +70,7 @@
     registry.RegisterFactory(std::make_unique<ClTensorHandleFactory>(mgr));
 }
 
-IBackendInternal::IBackendContextPtr
-ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
+IBackendInternal::IBackendContextPtr ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
 {
     return IBackendContextPtr{new ClBackendContext{options}};
 }
@@ -86,9 +86,27 @@
     return Optimizations{};
 }
 
+IBackendInternal::IBackendSpecificModelContextPtr ClBackend::CreateBackendSpecificModelContext(
+    const ModelOptions& modelOptions) const
+{
+    return IBackendSpecificModelContextPtr{new ClBackendModelContext{modelOptions}};
+}
+
 IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport() const
 {
-    static ILayerSupportSharedPtr layerSupport{new ClLayerSupport};
+    static ILayerSupportSharedPtr layerSupport
+        {
+            new ClLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
+        };
+    return layerSupport;
+}
+
+IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport(const ModelOptions& modelOptions) const
+{
+    static ILayerSupportSharedPtr layerSupport
+    {
+        new ClLayerSupport(CreateBackendSpecificModelContext(modelOptions))
+    };
     return layerSupport;
 }
 
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index e85c616..108124c 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -36,8 +36,12 @@
 
     IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+
+    IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(
+        const ModelOptions& modelOptions) const override;
 };
 
 } // namespace armnn
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index 42f42b3..22a4cea 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -99,7 +99,6 @@
     {
         return value.AsBool();
     }
-
     return defaultValue;
 }
 
@@ -112,22 +111,6 @@
     return defaultValue;
 }
 
-template <typename F>
-void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f)
-{
-    for (auto optionsGroup : options)
-    {
-        if (optionsGroup.GetBackendId() == backend)
-        {
-            for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
-            {
-                const BackendOptions::BackendOption option = optionsGroup.GetOption(i);
-                f(option.GetName(), option.GetValue());
-            }
-        }
-    }
-}
-
 void ConfigureTuner(arm_compute::CLTuner &tuner, TuningLevel level)
 {
     tuner.set_tune_new_kernels(true); // Turn on tuning initially.
diff --git a/src/backends/cl/ClBackendModelContext.cpp b/src/backends/cl/ClBackendModelContext.cpp
new file mode 100644
index 0000000..0ef26b6
--- /dev/null
+++ b/src/backends/cl/ClBackendModelContext.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClBackendModelContext.hpp"
+
+namespace
+{
+
+bool ParseBool(const armnn::BackendOptions::Var& value, bool defaultValue)
+{
+    if (value.IsBool())
+    {
+        return value.AsBool();
+    }
+    return defaultValue;
+}
+
+} // namespace anonymous
+
+namespace armnn
+{
+
+ClBackendModelContext::ClBackendModelContext(const ModelOptions& modelOptions)
+    : m_IsFastMathEnabled(false)
+{
+   if (!modelOptions.empty())
+   {
+       ParseOptions(modelOptions, "GpuAcc", [&](std::string name, const BackendOptions::Var& value)
+       {
+           if (name == "FastMathEnabled")
+           {
+               m_IsFastMathEnabled |= ParseBool(value, false);
+           }
+       });
+   }
+}
+
+bool ClBackendModelContext::IsFastMathEnabled() const
+{
+    return m_IsFastMathEnabled;
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClBackendModelContext.hpp b/src/backends/cl/ClBackendModelContext.hpp
new file mode 100644
index 0000000..59f7f8f
--- /dev/null
+++ b/src/backends/cl/ClBackendModelContext.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IBackendContext.hpp>
+
+namespace armnn
+{
+
+class ClBackendModelContext : public IBackendModelContext
+{
+public:
+    ClBackendModelContext(const ModelOptions& modelOptions);
+
+    bool IsFastMathEnabled() const;
+
+private:
+    bool m_IsFastMathEnabled;
+};
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 1af5c91..7c1466e 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -5,14 +5,17 @@
 
 #include "ClLayerSupport.hpp"
 #include "ClBackendId.hpp"
+#include "ClBackendModelContext.hpp"
 
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/BackendRegistry.hpp>
 
 #include <InternalTypes.hpp>
 #include <LayerSupportCommon.hpp>
 
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
 #if defined(ARMCOMPUTECL_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -155,6 +158,16 @@
 }
 } // anonymous namespace
 
+ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
+    : m_ModelContextPtr(modelContextPtr)
+{
+}
+
+ClLayerSupport::ClLayerSupport()
+    : m_ModelContextPtr(nullptr)
+{
+}
+
 bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
                                     const TensorInfo& output,
                                     Optional<std::string&> reasonIfUnsupported) const
@@ -322,13 +335,29 @@
                                               const Optional<TensorInfo>& biases,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
+    bool isFastMathEnabled = false;
+#if defined(ARMCOMPUTECL_ENABLED)
+    if (m_ModelContextPtr)
+    {
+        if (m_ModelContextPtr.get() != nullptr)
+        {
+            auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+            if (modelOptions)
+            {
+                isFastMathEnabled = modelOptions->IsFastMathEnabled();
+            }
+        }
+    }
+#endif
+
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   isFastMathEnabled);
 }
 
 bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index ed0486e..d7e2553 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -4,6 +4,8 @@
 //
 #pragma once
 
+#include <armnn/backends/IBackendInternal.hpp>
+
 #include <backendsCommon/LayerSupportBase.hpp>
 
 namespace armnn
@@ -12,6 +14,10 @@
 class ClLayerSupport : public LayerSupportBase
 {
 public:
+    explicit ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr);
+    ClLayerSupport();
+    ~ClLayerSupport() {}
+
     ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
@@ -318,6 +324,9 @@
                               const TransposeDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+private:
+    const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
+
 };
 
 } // namespace armnn
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 269057a..9cbe21e 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -16,6 +16,7 @@
 BACKEND_SOURCES := \
         ClBackend.cpp \
         ClBackendContext.cpp \
+        ClBackendModelContext.cpp \
         ClContextControl.cpp \
         ClLayerSupport.cpp \
         ClRegistryInitializer.cpp \
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index c2a8005..2797080 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -100,4 +100,34 @@
     BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
 }
 
+BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
+{
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input  = net->AddInputLayer(0);
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    armnn::OptimizerOptions optimizerOptions;
+    armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
+    optimizerOptions.m_ModelOptions.push_back(modelOptions);
+
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+    *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+
+    BOOST_CHECK(optimizedNet);
+
+    auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+
+    BOOST_TEST(modelOptionsOut.size() == 1);
+    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+}
+
 BOOST_AUTO_TEST_SUITE_END();
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 73ec95c..42c9903 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -24,7 +24,8 @@
                                                     const TensorInfo& output,
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const Optional<TensorInfo>& biases)
+                                                    const Optional<TensorInfo>& biases,
+                                                    bool isFastMathEnabled)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -52,7 +53,9 @@
                                                      &aclOutputInfo,
                                                      layerInfo,
                                                      arm_compute::WeightsInfo(),
-                                                     aclDilationInfo);
+                                                     aclDilationInfo,
+                                                     arm_compute::ActivationLayerInfo(),
+                                                     isFastMathEnabled);
 }
 
 ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
index 6d7e9f3..8b0afad 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.hpp
@@ -22,7 +22,8 @@
                                                     const TensorInfo& output,
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const Optional<TensorInfo>& biases);
+                                                    const Optional<TensorInfo>& biases,
+                                                    bool isFastMathEnabled = false);
 
 class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {
diff --git a/src/backends/neon/CMakeLists.txt b/src/backends/neon/CMakeLists.txt
index 327276c..4654de5 100644
--- a/src/backends/neon/CMakeLists.txt
+++ b/src/backends/neon/CMakeLists.txt
@@ -8,6 +8,8 @@
         NeonBackend.cpp
         NeonBackend.hpp
         NeonBackendId.hpp
+        NeonBackendModelContext.hpp
+        NeonBackendModelContext.cpp
         NeonInterceptorScheduler.hpp
         NeonInterceptorScheduler.cpp
         NeonLayerSupport.cpp
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 01cc6d8..31e08ce 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -5,6 +5,7 @@
 
 #include "NeonBackend.hpp"
 #include "NeonBackendId.hpp"
+#include "NeonBackendModelContext.hpp"
 #include "NeonWorkloadFactory.hpp"
 #include "NeonLayerSupport.hpp"
 #include "NeonTensorHandleFactory.hpp"
@@ -75,9 +76,27 @@
     return Optimizations{};
 }
 
+IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext(
+    const ModelOptions& modelOptions) const
+{
+    return IBackendSpecificModelContextPtr{new NeonBackendModelContext{modelOptions}};
+}
+
 IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport() const
 {
-    static ILayerSupportSharedPtr layerSupport{new NeonLayerSupport};
+    static ILayerSupportSharedPtr layerSupport
+        {
+            new NeonLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
+        };
+    return layerSupport;
+}
+
+IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport(const ModelOptions& modelOptions) const
+{
+    static ILayerSupportSharedPtr layerSupport
+        {
+            new NeonLayerSupport(CreateBackendSpecificModelContext(modelOptions))
+        };
     return layerSupport;
 }
 
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index ad4ac8d..6458ecc 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -31,12 +31,16 @@
         const IRuntime::CreationOptions&, IBackendProfilingPtr& backendProfiling) override;
     IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+    IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
 
     std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;
 
     void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) override;
+
+    IBackendInternal::IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(
+        const ModelOptions& modelOptions) const override;
 };
 
 } // namespace armnn
diff --git a/src/backends/neon/NeonBackendModelContext.cpp b/src/backends/neon/NeonBackendModelContext.cpp
new file mode 100644
index 0000000..2be71e5
--- /dev/null
+++ b/src/backends/neon/NeonBackendModelContext.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonBackendModelContext.hpp"
+
+namespace
+{
+
+bool ParseBool(const armnn::BackendOptions::Var& value, bool defaultValue)
+{
+    if (value.IsBool())
+    {
+        return value.AsBool();
+    }
+    return defaultValue;
+}
+
+} // namespace anonymous
+
+namespace armnn
+{
+
+NeonBackendModelContext::NeonBackendModelContext(const ModelOptions& modelOptions)
+    : m_IsFastMathEnabled(false)
+{
+   if (!modelOptions.empty())
+   {
+       ParseOptions(modelOptions, "CpuAcc", [&](std::string name, const BackendOptions::Var& value)
+       {
+           if (name == "FastMathEnabled")
+           {
+               m_IsFastMathEnabled |= ParseBool(value, false);
+           }
+       });
+   }
+}
+
+bool NeonBackendModelContext::IsFastMathEnabled() const
+{
+    return m_IsFastMathEnabled;
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/NeonBackendModelContext.hpp b/src/backends/neon/NeonBackendModelContext.hpp
new file mode 100644
index 0000000..938d8af
--- /dev/null
+++ b/src/backends/neon/NeonBackendModelContext.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/backends/IBackendContext.hpp>
+
+namespace armnn
+{
+
+class NeonBackendModelContext : public IBackendModelContext
+{
+public:
+    NeonBackendModelContext(const ModelOptions& modelOptions);
+
+    bool IsFastMathEnabled() const;
+
+private:
+    bool m_IsFastMathEnabled;
+};
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 9dc8a01..853a518 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -5,6 +5,7 @@
 
 #include "NeonLayerSupport.hpp"
 #include "NeonBackendId.hpp"
+#include "NeonBackendModelContext.hpp"
 
 #include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
@@ -15,6 +16,7 @@
 #include <InternalTypes.hpp>
 #include <LayerSupportCommon.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #if defined(ARMCOMPUTENEON_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
@@ -125,6 +127,16 @@
 #endif
 } // anonymous namespace
 
+NeonLayerSupport::NeonLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
+    : m_ModelContextPtr(modelContextPtr)
+{
+}
+
+NeonLayerSupport::NeonLayerSupport()
+    : m_ModelContextPtr(nullptr)
+{
+}
+
 bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
                                       const TensorInfo& output,
                                       Optional<std::string&> reasonIfUnsupported) const
@@ -311,13 +323,29 @@
                                                 const Optional<TensorInfo>& biases,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
+    bool isFastMathEnabled = false;
+#if defined(ARMCOMPUTENEON_ENABLED)
+    if (m_ModelContextPtr)
+    {
+        if (m_ModelContextPtr.get() != nullptr)
+        {
+            auto modelOptions = armnn::PolymorphicDowncast<NeonBackendModelContext*>(m_ModelContextPtr.get());
+            if (modelOptions)
+            {
+                isFastMathEnabled = modelOptions->IsFastMathEnabled();
+            }
+        }
+    }
+#endif
+
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
                                    reasonIfUnsupported,
                                    input,
                                    output,
                                    descriptor,
                                    weights,
-                                   biases);
+                                   biases,
+                                   isFastMathEnabled);
 }
 
 bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index bdc905d..d477dcd 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -4,6 +4,8 @@
 //
 #pragma once
 
+#include <armnn/backends/IBackendInternal.hpp>
+
 #include <backendsCommon/LayerSupportBase.hpp>
 
 namespace armnn
@@ -12,6 +14,11 @@
 class NeonLayerSupport : public LayerSupportBase
 {
 public:
+    explicit NeonLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr);
+    NeonLayerSupport();
+
+    ~NeonLayerSupport() {}
+
     ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
     bool IsAbsSupported(const TensorInfo& input,
                         const TensorInfo& output,
@@ -327,6 +334,9 @@
                               const TransposeDescriptor& descriptor,
                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+private:
+    const IBackendInternal::IBackendSpecificModelContextPtr m_ModelContextPtr;
+
 }; // class NeonLayerSupport
 
 } // namespace armnn
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index aeee915..9bd08a1 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -15,6 +15,7 @@
 
 BACKEND_SOURCES := \
         NeonBackend.cpp \
+        NeonBackendModelContext.cpp \
         NeonInterceptorScheduler.cpp \
         NeonLayerSupport.cpp \
         NeonRegistryInitializer.cpp \
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index d552c17..4c27aca 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -70,4 +70,34 @@
     BOOST_CHECK(!optNet);
 }
 
+BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
+{
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input  = net->AddInputLayer(0);
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    armnn::OptimizerOptions optimizerOptions;
+    armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
+    optimizerOptions.m_ModelOptions.push_back(modelOptions);
+
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+    *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+
+    BOOST_CHECK(optimizedNet);
+
+    auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+
+    BOOST_TEST(modelOptionsOut.size() == 1);
+    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 144baec..83f7611 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -21,10 +21,11 @@
 using namespace armcomputetensorutils;
 
 arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
-    const TensorInfo& output,
-    const Convolution2dDescriptor& descriptor,
-    const TensorInfo& weights,
-    const Optional<TensorInfo>& biases)
+                                                      const TensorInfo& output,
+                                                      const Convolution2dDescriptor& descriptor,
+                                                      const TensorInfo& weights,
+                                                      const Optional<TensorInfo>& biases,
+                                                      bool isFastMathEnabled)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -52,7 +53,9 @@
                                                      &aclOutputInfo,
                                                      layerInfo,
                                                      arm_compute::WeightsInfo(),
-                                                     aclDilationInfo);
+                                                     aclDilationInfo,
+                                                     arm_compute::ActivationLayerInfo(),
+                                                     isFastMathEnabled);
 }
 
 NeonConvolution2dWorkload::NeonConvolution2dWorkload(
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
index 3fb408d..54e08a2 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.hpp
@@ -17,10 +17,11 @@
 {
 
 arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
-    const TensorInfo& output,
-    const Convolution2dDescriptor& descriptor,
-    const TensorInfo& weights,
-    const Optional<TensorInfo>& biases);
+                                                      const TensorInfo& output,
+                                                      const Convolution2dDescriptor& descriptor,
+                                                      const TensorInfo& weights,
+                                                      const Optional<TensorInfo>& biases,
+                                                      bool isFastMathEnabled = false);
 
 class NeonConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {