IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions

Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com>
Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp
index 4ddfc1a..b494a36 100644
--- a/delegate/classic/src/armnn_delegate.cpp
+++ b/delegate/classic/src/armnn_delegate.cpp
@@ -335,7 +335,7 @@
     DelegateData delegateData(delegate->m_Options.GetBackends());
 
     // Build ArmNN Network
-    armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions;
+    armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
     armnn::NetworkId networkId;
     delegateData.m_Network = armnn::INetwork::Create(networkOptions);
 
@@ -424,11 +424,11 @@
         armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
         armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
         // There's a bit of an assumption here that the delegate will only support Malloc memory source.
-        if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
+        if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
         {
             inputSource = armnn::MemorySource::Malloc;
         }
-        if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
+        if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
         {
             outputSource = armnn::MemorySource::Malloc;
         }
diff --git a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
index 26acfe9..409b769 100644
--- a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
+++ b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
@@ -76,7 +76,7 @@
     // Create the Armnn Delegate
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
 
-    armnn::OptimizerOptions optimizerOptions(true, true, false, true);
+    armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, true);
 
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
diff --git a/delegate/common/include/DelegateOptions.hpp b/delegate/common/include/DelegateOptions.hpp
index 3bf9b35..abf446a 100644
--- a/delegate/common/include/DelegateOptions.hpp
+++ b/delegate/common/include/DelegateOptions.hpp
@@ -32,12 +32,12 @@
                     armnn::Optional<armnn::LogSeverity> logSeverityLevel = armnn::EmptyOptional());
 
     DelegateOptions(armnn::Compute computeDevice,
-                    const armnn::OptimizerOptions& optimizerOptions,
+                    const armnn::OptimizerOptionsOpaque& optimizerOptions,
                     const armnn::Optional<armnn::LogSeverity>& logSeverityLevel = armnn::EmptyOptional(),
                     const armnn::Optional<armnn::DebugCallbackFunction>& func = armnn::EmptyOptional());
 
     DelegateOptions(const std::vector<armnn::BackendId>& backends,
-                    const armnn::OptimizerOptions& optimizerOptions,
+                    const armnn::OptimizerOptionsOpaque& optimizerOptions,
                     const armnn::Optional<armnn::LogSeverity>& logSeverityLevel = armnn::EmptyOptional(),
                     const armnn::Optional<armnn::DebugCallbackFunction>& func = armnn::EmptyOptional());
 
@@ -218,9 +218,9 @@
 
     bool IsLoggingEnabled();
 
-    const armnn::OptimizerOptions& GetOptimizerOptions() const;
+    const armnn::OptimizerOptionsOpaque& GetOptimizerOptions() const;
 
-    void SetOptimizerOptions(const armnn::OptimizerOptions& optimizerOptions);
+    void SetOptimizerOptions(const armnn::OptimizerOptionsOpaque& optimizerOptions);
 
     const armnn::Optional<armnn::DebugCallbackFunction>& GetDebugCallbackFunction() const;
 
diff --git a/delegate/common/src/DelegateOptions.cpp b/delegate/common/src/DelegateOptions.cpp
index c4f0ad7..f8892c4 100644
--- a/delegate/common/src/DelegateOptions.cpp
+++ b/delegate/common/src/DelegateOptions.cpp
@@ -32,7 +32,7 @@
     }
 
     explicit DelegateOptionsImpl(armnn::Compute computeDevice,
-                                 const armnn::OptimizerOptions& optimizerOptions,
+                                 const armnn::OptimizerOptionsOpaque& optimizerOptions,
                                  const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
                                  const armnn::Optional<armnn::DebugCallbackFunction>& func)
             : p_Backends({computeDevice}),
@@ -44,7 +44,7 @@
     }
 
     explicit DelegateOptionsImpl(const std::vector<armnn::BackendId>& backends,
-                                 const armnn::OptimizerOptions& optimizerOptions,
+                                 const armnn::OptimizerOptionsOpaque& optimizerOptions,
                                  const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
                                  const armnn::Optional<armnn::DebugCallbackFunction>& func)
             : p_Backends(backends),
@@ -66,7 +66,7 @@
     armnn::IRuntime::CreationOptions p_RuntimeOptions;
 
     /// Options for the optimization step for the network
-    armnn::OptimizerOptions p_OptimizerOptions;
+    armnn::OptimizerOptionsOpaque p_OptimizerOptions;
 
     /// Internal profiling options. Written to INetworkProperties during model load.
     /// Indicates whether internal profiling is enabled or not.
@@ -118,7 +118,7 @@
 }
 
 DelegateOptions::DelegateOptions(armnn::Compute computeDevice,
-                                 const armnn::OptimizerOptions& optimizerOptions,
+                                 const armnn::OptimizerOptionsOpaque& optimizerOptions,
                                  const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
                                  const armnn::Optional<armnn::DebugCallbackFunction>& func)
     : p_DelegateOptionsImpl(std::make_unique<DelegateOptionsImpl>(computeDevice, optimizerOptions,
@@ -127,7 +127,7 @@
 }
 
 DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends,
-                                 const armnn::OptimizerOptions& optimizerOptions,
+                                 const armnn::OptimizerOptionsOpaque& optimizerOptions,
                                  const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
                                  const armnn::Optional<armnn::DebugCallbackFunction>& func)
     : p_DelegateOptionsImpl(std::make_unique<DelegateOptionsImpl>(backends, optimizerOptions,
@@ -142,7 +142,7 @@
     : p_DelegateOptionsImpl(std::make_unique<DelegateOptionsImpl>())
 {
     armnn::IRuntime::CreationOptions runtimeOptions;
-    armnn::OptimizerOptions optimizerOptions;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
     bool internalProfilingState = false;
     armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
     for (size_t i = 0; i < num_options; ++i)
@@ -182,7 +182,7 @@
         {
             armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath",
                                                      std::string(options_values[i])}});
-            optimizerOptions.m_ModelOptions.push_back(option);
+            optimizerOptions.AddModelOption(option);
         }
         else if (std::string(options_keys[i]) == std::string("gpu-tuning-file"))
         {
@@ -204,24 +204,24 @@
         {
             armnn::BackendOptions option("GpuAcc", {{"SaveCachedNetwork",
                                                      armnn::stringUtils::StringToBool(options_values[i])}});
-            optimizerOptions.m_ModelOptions.push_back(option);
+            optimizerOptions.AddModelOption(option);
         }
         else if (std::string(options_keys[i]) == std::string("cached-network-filepath"))
         {
             armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath",
                                                      std::string(options_values[i])}});
-            optimizerOptions.m_ModelOptions.push_back(option);
+            optimizerOptions.AddModelOption(option);
         }
             // Process GPU & CPU backend options
         else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
         {
             armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled",
-                                                     armnn::stringUtils::StringToBool(options_values[i])}});
-            optimizerOptions.m_ModelOptions.push_back(modelOptionGpu);
+                                                 armnn::stringUtils::StringToBool(options_values[i])}});
+            optimizerOptions.AddModelOption(modelOptionGpu);
 
             armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled",
-                                                     armnn::stringUtils::StringToBool(options_values[i])}});
-            optimizerOptions.m_ModelOptions.push_back(modelOptionCpu);
+                                                 armnn::stringUtils::StringToBool(options_values[i])}});
+            optimizerOptions.AddModelOption(modelOptionCpu);
         }
             // Process CPU backend options
         else if (std::string(options_keys[i]) == std::string("number-of-threads"))
@@ -229,17 +229,17 @@
             unsigned int numberOfThreads = armnn::numeric_cast<unsigned int>(atoi(options_values[i]));
             armnn::BackendOptions modelOption("CpuAcc",
                                               {{"NumberOfThreads", numberOfThreads}});
-            optimizerOptions.m_ModelOptions.push_back(modelOption);
+            optimizerOptions.AddModelOption(modelOption);
         }
             // Process reduce-fp32-to-fp16 option
         else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
         {
-            optimizerOptions.m_ReduceFp32ToFp16 = armnn::stringUtils::StringToBool(options_values[i]);
+            optimizerOptions.SetReduceFp32ToFp16(armnn::stringUtils::StringToBool(options_values[i]));
         }
             // Process debug-data
         else if (std::string(options_keys[i]) == std::string("debug-data"))
         {
-            optimizerOptions.m_Debug = armnn::stringUtils::StringToBool(options_values[i]);
+            optimizerOptions.SetDebugEnabled(armnn::stringUtils::StringToBool(options_values[i]));
         }
             // Infer output-shape
         else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
@@ -248,7 +248,7 @@
             {
                 { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) }
             });
-            optimizerOptions.m_ModelOptions.push_back(backendOption);
+            optimizerOptions.AddModelOption(backendOption);
         }
             // Allow expanded dims
         else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
@@ -257,18 +257,18 @@
             {
                 { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) }
             });
-            optimizerOptions.m_ModelOptions.push_back(backendOption);
+            optimizerOptions.AddModelOption(backendOption);
         }
             // Process memory-import
         else if (std::string(options_keys[i]) == std::string("memory-import"))
         {
-            optimizerOptions.m_ImportEnabled = armnn::stringUtils::StringToBool(options_values[i]);
+            optimizerOptions.SetImportEnabled(armnn::stringUtils::StringToBool(options_values[i]));
         }
             // Process enable-internal-profiling
         else if (std::string(options_keys[i]) == std::string("enable-internal-profiling"))
         {
             internalProfilingState = *options_values[i] != '0';
-            optimizerOptions.m_ProfilingEnabled = internalProfilingState;
+            optimizerOptions.SetProfilingEnabled(internalProfilingState);
         }
             // Process internal-profiling-detail
         else if (std::string(options_keys[i]) == std::string("internal-profiling-detail"))
@@ -312,7 +312,8 @@
         // Process file-only-external-profiling
         else if (std::string(options_keys[i]) == std::string("file-only-external-profiling"))
         {
-            runtimeOptions.m_ProfilingOptions.m_FileOnly = armnn::stringUtils::StringToBool(options_values[i]);
+            runtimeOptions.m_ProfilingOptions.m_FileOnly =
+                    armnn::stringUtils::StringToBool(options_values[i]);
         }
         // Process counter-capture-period
         else if (std::string(options_keys[i]) == std::string("counter-capture-period"))
@@ -408,12 +409,12 @@
     return p_DelegateOptionsImpl->m_LoggingSeverity.has_value();
 }
 
-const armnn::OptimizerOptions& DelegateOptions::GetOptimizerOptions() const
+const armnn::OptimizerOptionsOpaque& DelegateOptions::GetOptimizerOptions() const
 {
     return p_DelegateOptionsImpl->p_OptimizerOptions;
 }
 
-void DelegateOptions::SetOptimizerOptions(const armnn::OptimizerOptions& optimizerOptions)
+void DelegateOptions::SetOptimizerOptions(const armnn::OptimizerOptionsOpaque& optimizerOptions)
 {
     p_DelegateOptionsImpl->p_OptimizerOptions = optimizerOptions;
 }
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index cfaea01..ee1a4ed 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -400,7 +400,7 @@
     DelegateData delegateData(delegate->m_Options.GetBackends());
 
     // Build ArmNN Network
-    armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions;
+    armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
     armnn::NetworkId networkId;
     delegateData.m_Network = armnn::INetwork::Create(networkOptions);
 
@@ -490,11 +490,11 @@
         armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
         armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
         // There's a bit of an assumption here that the delegate will only support Malloc memory source.
-        if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
+        if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
         {
             inputSource = armnn::MemorySource::Malloc;
         }
-        if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
+        if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
         {
             outputSource = armnn::MemorySource::Malloc;
         }
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
index d84d420..fd1ef88 100644
--- a/delegate/test/DelegateOptionsTest.cpp
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -26,7 +26,7 @@
         std::vector<float> expectedResult = { 1, 2, 2, 2 };
 
         // Enable ReduceFp32ToFp16
-        armnn::OptimizerOptions optimizerOptions(true, true, false, false);
+        armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, false);
         armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
         DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
@@ -55,7 +55,7 @@
         std::vector<float> expectedResult = { 1, 2, 2, 2 };
 
         // Enable Debug
-        armnn::OptimizerOptions optimizerOptions(false, true, false, false);
+        armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false);
         armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
         DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
@@ -83,7 +83,7 @@
     std::vector<float> expectedResult = { 1, 2, 2, 2 };
 
     // Enable debug with debug callback function
-    armnn::OptimizerOptions optimizerOptions(false, true, false, false);
+    armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false);
     bool callback = false;
     auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
     {
@@ -121,7 +121,7 @@
     std::vector<uint8_t> divData = { 2, 2, 3, 4 };
     std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
 
-    armnn::OptimizerOptions optimizerOptions(false, false, false, true);
+    armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, true);
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
     DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
@@ -227,7 +227,8 @@
                                  });
     modelOptions.push_back(cpuAcc);
 
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+    armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
+                                                   false, modelOptions, false);
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
@@ -256,7 +257,8 @@
         std::vector<float> divData = { 2, 2, 3, 4 };
         std::vector<float> expectedResult = { 1, 2, 2, 2 };
 
-        armnn::OptimizerOptions optimizerOptions(false, false, false, false);
+        armnn::OptimizerOptionsOpaque optimizerOptions(false, false,
+                                                       false, false);
         armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
         // Enable serialize to dot by specifying the target file name.
         delegateOptions.SetSerializeToDot(filename);
@@ -299,7 +301,8 @@
         options_values.get()[i] = values[i].c_str();
     }
 
-    armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+    armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(),
+                                                   num_options, nullptr);
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
                               tensorShape,
                               inputData,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 2737537..819f5cb 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -136,53 +136,53 @@
     ~IConnectableLayer() {}
 };
 
-
-/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides
-/// a set of features that allows the user to customize this optimization on a per model basis.
 struct OptimizerOptions
 {
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
     OptimizerOptions()
-        : m_ReduceFp32ToFp16(false)
-        , m_Debug(false)
-        , m_DebugToFile(false)
-        , m_ReduceFp32ToBf16(false)
-        , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
-        , m_ImportEnabled(false)
-        , m_ModelOptions()
-        , m_ProfilingEnabled(false)
-        , m_ExportEnabled(false)
-        , m_AllowExpandedDims(false)
+            : m_ReduceFp32ToFp16(false)
+            , m_Debug(false)
+            , m_DebugToFile(false)
+            , m_ReduceFp32ToBf16(false)
+            , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+            , m_ImportEnabled(false)
+            , m_ModelOptions()
+            , m_ProfilingEnabled(false)
+            , m_ExportEnabled(false)
+            , m_AllowExpandedDims(false)
     {}
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
                      ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false)
-        : m_ReduceFp32ToFp16(reduceFp32ToFp16)
-        , m_Debug(debug)
-        , m_DebugToFile(debugToFile)
-        , m_ReduceFp32ToBf16(reduceFp32ToBf16)
-        , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
-        , m_ImportEnabled(importEnabled)
-        , m_ModelOptions(modelOptions)
-        , m_ProfilingEnabled(false)
-        , m_ExportEnabled(exportEnabled)
-        , m_AllowExpandedDims(false)
+            : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+            , m_Debug(debug)
+            , m_DebugToFile(debugToFile)
+            , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+            , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+            , m_ImportEnabled(importEnabled)
+            , m_ModelOptions(modelOptions)
+            , m_ProfilingEnabled(false)
+            , m_ExportEnabled(exportEnabled)
+            , m_AllowExpandedDims(false)
     {
     }
 
+    ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
                      bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
                      bool debugToFile = false, bool allowExpandedDims = false)
-        : m_ReduceFp32ToFp16(reduceFp32ToFp16)
-        , m_Debug(debug)
-        , m_DebugToFile(debugToFile)
-        , m_ReduceFp32ToBf16(reduceFp32ToBf16)
-        , m_shapeInferenceMethod(shapeInferenceMethod)
-        , m_ImportEnabled(importEnabled)
-        , m_ModelOptions(modelOptions)
-        , m_ProfilingEnabled(false)
-        , m_ExportEnabled(exportEnabled)
-        , m_AllowExpandedDims(allowExpandedDims)
+            : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+            , m_Debug(debug)
+            , m_DebugToFile(debugToFile)
+            , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+            , m_shapeInferenceMethod(shapeInferenceMethod)
+            , m_ImportEnabled(importEnabled)
+            , m_ModelOptions(modelOptions)
+            , m_ProfilingEnabled(false)
+            , m_ExportEnabled(exportEnabled)
+            , m_AllowExpandedDims(allowExpandedDims)
     {
     }
 
@@ -195,7 +195,8 @@
         stream << "\tDebug: " << m_Debug << "\n";
         stream << "\tDebug to file: " << m_DebugToFile << "\n";
         stream << "\tShapeInferenceMethod: " <<
-        (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
+               (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly
+               ? "ValidateOnly" : "InferAndValidate") << "\n";
         stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
         stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
         stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
@@ -252,6 +253,75 @@
     bool m_AllowExpandedDims;
 };
 
+/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides
+/// a set of features that allows the user to customize this optimization on a per model basis.
+struct OptimizerOptionsOpaqueImpl;
+
+class OptimizerOptionsOpaque
+{
+public:
+    OptimizerOptionsOpaque();
+    OptimizerOptionsOpaque(const OptimizerOptionsOpaque& other);
+    ~OptimizerOptionsOpaque();
+
+    OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct);
+
+    OptimizerOptionsOpaque& operator=(OptimizerOptionsOpaque other);
+
+    OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
+                           ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false);
+
+    OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
+                           ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
+                           bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
+                           bool debugToFile = false, bool allowExpandedDims = false);
+
+    const std::string ToString() const;
+
+    bool GetProfilingEnabled() const;
+
+    bool GetImportEnabled() const;
+
+    bool GetExportEnabled() const;
+
+    bool GetReduceFp32ToFp16() const;
+
+    bool GetReduceFp32ToBf16() const;
+
+    bool GetDebugEnabled() const;
+
+    bool GetDebugToFileEnabled() const;
+
+    bool GetAllowExpandedDims() const;
+
+    armnn::ModelOptions GetModelOptions() const;
+
+    armnn::ShapeInferenceMethod GetShapeInferenceMethod() const;
+
+    void SetImportEnabled(bool ImportState);
+
+    void SetExportEnabled(bool ExportState);
+
+    void SetProfilingEnabled(bool ProfilingState);
+
+    void SetDebugEnabled(bool DebugState);
+
+    void SetDebugToFileEnabled(bool DebugFileState);
+
+    void SetReduceFp32ToFp16(bool ReduceFp32ToFp16State);
+
+    void SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType);
+
+    void AddModelOption(armnn::BackendOptions);
+
+    void SetAllowExpandedDims(bool ExpandedDimsAllowed);
+
+private:
+
+    std::unique_ptr<armnn::OptimizerOptionsOpaqueImpl> p_OptimizerOptionsImpl;
+
+};
+
 class IWorkloadFactory;
 class NetworkImpl;
 using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
@@ -768,6 +838,11 @@
                                          const IDeviceSpec& deviceSpec,
                                          const OptimizerOptions& options,
                                          Optional<std::vector<std::string>&> messages);
+    friend IOptimizedNetworkPtr Optimize(const INetwork& network,
+                                         const std::vector<BackendId>& backendPreferences,
+                                         const IDeviceSpec& deviceSpec,
+                                         const OptimizerOptionsOpaque& options,
+                                         Optional<std::vector<std::string>&> messages);
 
     INetwork(NetworkOptions networkOptions = {});
 
@@ -819,12 +894,12 @@
     friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
                                          const std::vector<BackendId>& backendPreferences,
                                          const IDeviceSpec& deviceSpec,
-                                         const OptimizerOptions& options,
+                                         const OptimizerOptionsOpaque& options,
                                          Optional<std::vector<std::string>&> messages);
     friend IOptimizedNetworkPtr Optimize(const Graph& inGraph,
                                          const std::vector<BackendId>& backendPreferences,
                                          const IDeviceSpec& deviceSpec,
-                                         const OptimizerOptions& options,
+                                         const OptimizerOptionsOpaque& options,
                                          Optional<std::vector<std::string>&> messages);
 
     IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
@@ -844,7 +919,7 @@
 IOptimizedNetworkPtr Optimize(const INetwork& network,
                               const std::vector<BackendId>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
-                              const OptimizerOptions& options = OptimizerOptions(),
+                              const OptimizerOptionsOpaque& options = OptimizerOptionsOpaque(),
                               Optional<std::vector<std::string>&> messages = EmptyOptional());
 
 /// Create an optimized version of the network
@@ -859,6 +934,21 @@
 IOptimizedNetworkPtr Optimize(const Graph& inGraph,
                               const std::vector<BackendId>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
+                              const OptimizerOptionsOpaque& options,
+                              Optional<std::vector<std::string>&> messages = EmptyOptional());
+
+/// Accept legacy OptimizerOptions
+IOptimizedNetworkPtr Optimize(const Graph& inGraph,
+                              const std::vector<BackendId>& backendPreferences,
+                              const IDeviceSpec& deviceSpec,
                               const OptimizerOptions& options,
                               Optional<std::vector<std::string>&> messages = EmptyOptional());
+
+/// Accept legacy OptimizerOptions
+IOptimizedNetworkPtr Optimize(const INetwork& network,
+                              const std::vector<BackendId>& backendPreferences,
+                              const IDeviceSpec& deviceSpec,
+                              const OptimizerOptions& options,
+                              Optional<std::vector<std::string>&> messages = EmptyOptional());
+
 } //namespace armnn
diff --git a/samples/CustomMemoryAllocatorSample.cpp b/samples/CustomMemoryAllocatorSample.cpp
index da249e0..14c779e 100644
--- a/samples/CustomMemoryAllocatorSample.cpp
+++ b/samples/CustomMemoryAllocatorSample.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -121,8 +121,8 @@
     fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // Optimise ArmNN network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
     IOptimizedNetworkPtr optNet =
                 Optimize(*network, {"GpuAcc"}, runtime->GetDeviceSpec(), optOptions);
     if (!optNet)
diff --git a/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp b/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp
index c8875a2..557ec8a 100644
--- a/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp
+++ b/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -104,7 +104,7 @@
                                            m_profiling(isProfilingEnabled)
 {
     m_profiling.ProfilingStart();
-    armnn::OptimizerOptions optimizerOptions;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
     m_model = tflite::FlatBufferModel::BuildFromFile(modelPath.c_str());
     if (m_model == nullptr)
     {
@@ -130,12 +130,12 @@
 
     /* enable fast math optimization */
     armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled", true}});
-    optimizerOptions.m_ModelOptions.push_back(modelOptionGpu);
+    optimizerOptions.AddModelOption(modelOptionGpu);
 
     armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled", true}});
-    optimizerOptions.m_ModelOptions.push_back(modelOptionCpu);
+    optimizerOptions.AddModelOption(modelOptionCpu);
     /* enable reduce float32 to float16 optimization */
-    optimizerOptions.m_ReduceFp32ToFp16 = true;
+    optimizerOptions.SetReduceFp32ToFp16(true);
 
     armnnDelegate::DelegateOptions delegateOptions(preferredBackends, optimizerOptions);
 
diff --git a/shim/sl/canonical/ArmnnDriverImpl.cpp b/shim/sl/canonical/ArmnnDriverImpl.cpp
index 0c98a16..0f7888b 100644
--- a/shim/sl/canonical/ArmnnDriverImpl.cpp
+++ b/shim/sl/canonical/ArmnnDriverImpl.cpp
@@ -144,9 +144,9 @@
 
     // Optimize the network
     armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
-    armnn::OptimizerOptions OptOptions;
-    OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
-    OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
+    armnn::OptimizerOptionsOpaque OptOptions;
+    OptOptions.SetReduceFp32ToFp16(float32ToFloat16);
+    OptOptions.SetProfilingEnabled(options.IsGpuProfilingEnabled());
 
     int cachedFd = -1;
     bool saveCachedNetwork = options.SaveCachedNetwork();
@@ -188,8 +188,8 @@
         { "FastMathEnabled", options.IsFastMathEnabled() },
         { "NumberOfThreads", options.GetNumberOfThreads() }
     });
-    OptOptions.m_ModelOptions.push_back(gpuAcc);
-    OptOptions.m_ModelOptions.push_back(cpuAcc);
+    OptOptions.AddModelOption(gpuAcc);
+    OptOptions.AddModelOption(cpuAcc);
 
     std::vector<std::string> errMessages;
     try
@@ -464,9 +464,9 @@
 
     // Optimize the network
     armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
-    armnn::OptimizerOptions OptOptions;
-    OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
-    OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
+    armnn::OptimizerOptionsOpaque OptOptions;
+    OptOptions.SetReduceFp32ToFp16(float32ToFloat16);
+    OptOptions.SetProfilingEnabled(options.IsGpuProfilingEnabled());
 
     armnn::BackendOptions gpuAcc("GpuAcc",
     {
@@ -482,8 +482,8 @@
         { "FastMathEnabled", options.IsFastMathEnabled() },
         { "NumberOfThreads", options.GetNumberOfThreads() }
     });
-    OptOptions.m_ModelOptions.push_back(gpuAcc);
-    OptOptions.m_ModelOptions.push_back(cpuAcc);
+    OptOptions.AddModelOption(gpuAcc);
+    OptOptions.AddModelOption(cpuAcc);
 
     std::vector<std::string> errMessages;
     try
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4b89daf..a069585 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -45,6 +45,194 @@
 
 INetwork::~INetwork() = default;
 
+OptimizerOptionsOpaque::OptimizerOptionsOpaque()
+        : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>())
+{
+}
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(OptimizerOptionsOpaque const &other)
+        : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(*other.p_OptimizerOptionsImpl))
+{
+}
+
+OptimizerOptionsOpaque::~OptimizerOptionsOpaque() = default;
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+                                               bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
+                                               bool debugToFile)
+        : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(reduceFp32ToFp16, debug, reduceFp32ToBf16,
+                                                                              importEnabled, modelOptions,
+                                                                              exportEnabled, debugToFile))
+{
+}
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+                                               ShapeInferenceMethod shapeInferenceMethod,
+                                               bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
+                                               bool debugToFile, bool allowExpandedDims)
+        : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(reduceFp32ToFp16, debug, reduceFp32ToBf16,
+                                                                              shapeInferenceMethod, importEnabled,
+                                                                              modelOptions, exportEnabled,
+                                                                              debugToFile, allowExpandedDims))
+{
+}
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct)
+    : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>())
+{
+    p_OptimizerOptionsImpl->m_ImportEnabled = OptimizerStruct.m_ImportEnabled;
+    p_OptimizerOptionsImpl->m_shapeInferenceMethod = OptimizerStruct.m_shapeInferenceMethod;
+    p_OptimizerOptionsImpl->m_ModelOptions = OptimizerStruct.m_ModelOptions;
+    p_OptimizerOptionsImpl->m_ProfilingEnabled = OptimizerStruct.m_ProfilingEnabled;
+    p_OptimizerOptionsImpl->m_DebugToFile = OptimizerStruct.m_DebugToFile;
+    p_OptimizerOptionsImpl->m_Debug = OptimizerStruct.m_Debug;
+    p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = OptimizerStruct.m_ReduceFp32ToFp16;
+    p_OptimizerOptionsImpl->m_ExportEnabled = OptimizerStruct.m_ExportEnabled;
+    p_OptimizerOptionsImpl->m_AllowExpandedDims = OptimizerStruct.m_AllowExpandedDims;
+    p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 = OptimizerStruct.m_ReduceFp32ToBf16;
+}
+
+OptimizerOptionsOpaque& OptimizerOptionsOpaque::operator= (OptimizerOptionsOpaque other)
+{
+    p_OptimizerOptionsImpl->m_ImportEnabled = other.GetImportEnabled();
+    p_OptimizerOptionsImpl->m_shapeInferenceMethod = other.GetShapeInferenceMethod();
+    p_OptimizerOptionsImpl->m_ModelOptions = other.GetModelOptions();
+    p_OptimizerOptionsImpl->m_ProfilingEnabled = other.GetProfilingEnabled();
+    p_OptimizerOptionsImpl->m_DebugToFile = other.GetDebugToFileEnabled();
+    p_OptimizerOptionsImpl->m_Debug = other.GetDebugEnabled();
+    p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = other.GetReduceFp32ToFp16();
+    p_OptimizerOptionsImpl->m_ExportEnabled = other.GetExportEnabled();
+    p_OptimizerOptionsImpl->m_AllowExpandedDims = other.GetAllowExpandedDims();
+    p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 = other.GetReduceFp32ToBf16();
+    return *this;
+}
+
+void OptimizerOptionsOpaque::SetImportEnabled(bool ImportState)
+{
+    p_OptimizerOptionsImpl->m_ImportEnabled = ImportState;
+}
+
+void OptimizerOptionsOpaque::SetExportEnabled(bool ExportState)
+{
+    p_OptimizerOptionsImpl->m_ExportEnabled = ExportState;
+}
+
+void OptimizerOptionsOpaque::SetProfilingEnabled(bool ProfilingState)
+{
+    p_OptimizerOptionsImpl->m_ProfilingEnabled = ProfilingState;
+}
+
+void OptimizerOptionsOpaque::SetDebugEnabled(bool DebugState)
+{
+    p_OptimizerOptionsImpl->m_Debug = DebugState;
+}
+
+void OptimizerOptionsOpaque::SetDebugToFileEnabled(bool DebugFileState)
+{
+    p_OptimizerOptionsImpl->m_DebugToFile = DebugFileState;
+}
+
+void OptimizerOptionsOpaque::SetReduceFp32ToFp16(bool ReduceFp32ToFp16State)
+{
+    p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = ReduceFp32ToFp16State;
+}
+
+void OptimizerOptionsOpaque::SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType)
+{
+    p_OptimizerOptionsImpl->m_shapeInferenceMethod = ShapeInferenceMethodType;
+}
+
+void OptimizerOptionsOpaque::SetAllowExpandedDims(bool ExpandedDimsAllowed)
+{
+    p_OptimizerOptionsImpl->m_AllowExpandedDims = ExpandedDimsAllowed;
+}
+
+void OptimizerOptionsOpaque::AddModelOption(armnn::BackendOptions NewModelOption)
+{
+    p_OptimizerOptionsImpl->m_ModelOptions.push_back(NewModelOption);
+}
+
+bool OptimizerOptionsOpaque::GetProfilingEnabled() const
+{
+    return p_OptimizerOptionsImpl->m_ProfilingEnabled;
+};
+
+bool OptimizerOptionsOpaque::GetImportEnabled() const
+{
+    return p_OptimizerOptionsImpl->m_ImportEnabled;
+};
+
+bool OptimizerOptionsOpaque::GetExportEnabled() const
+{
+    return p_OptimizerOptionsImpl->m_ExportEnabled;
+};
+
+bool OptimizerOptionsOpaque::GetReduceFp32ToFp16() const
+{
+    return p_OptimizerOptionsImpl->m_ReduceFp32ToFp16;
+};
+
+bool OptimizerOptionsOpaque::GetReduceFp32ToBf16() const
+{
+    return p_OptimizerOptionsImpl->m_ReduceFp32ToBf16;
+}
+
+bool OptimizerOptionsOpaque::GetDebugEnabled() const
+{
+    return p_OptimizerOptionsImpl->m_Debug;
+}
+
+bool OptimizerOptionsOpaque::GetDebugToFileEnabled() const
+{
+    return p_OptimizerOptionsImpl->m_DebugToFile;
+}
+
+bool OptimizerOptionsOpaque::GetAllowExpandedDims() const
+{
+    return p_OptimizerOptionsImpl->m_AllowExpandedDims;
+}
+
+armnn::ModelOptions OptimizerOptionsOpaque::GetModelOptions() const
+{
+    return p_OptimizerOptionsImpl->m_ModelOptions;
+}
+
+armnn::ShapeInferenceMethod OptimizerOptionsOpaque::GetShapeInferenceMethod() const
+{
+    return p_OptimizerOptionsImpl->m_shapeInferenceMethod;
+}
+
+const std::string OptimizerOptionsOpaque::ToString() const
+{
+    std::stringstream stream;
+    stream << "OptimizerOptions: \n";
+    stream << "\tReduceFp32ToFp16: " << p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 << "\n";
+    stream << "\tReduceFp32ToBf16: " << p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 << "\n";
+    stream << "\tDebug: " << p_OptimizerOptionsImpl->m_Debug << "\n";
+    stream << "\tDebug to file: " << p_OptimizerOptionsImpl->m_DebugToFile << "\n";
+    stream << "\tShapeInferenceMethod: " <<
+           (p_OptimizerOptionsImpl->m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ?
+           "ValidateOnly" : "InferAndValidate") << "\n";
+    stream << "\tImportEnabled: " << p_OptimizerOptionsImpl->m_ImportEnabled << "\n";
+    stream << "\tExportEnabled: " << p_OptimizerOptionsImpl->m_ExportEnabled << "\n";
+    stream << "\tProfilingEnabled: " << p_OptimizerOptionsImpl->m_ProfilingEnabled << "\n";
+    stream << "\tAllowExpandedDims: " << p_OptimizerOptionsImpl->m_AllowExpandedDims << "\n";
+
+    stream << "\tModelOptions: \n";
+    for (auto optionsGroup : p_OptimizerOptionsImpl->m_ModelOptions)
+    {
+        for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
+        {
+            const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
+            stream << "\t\tBackend: "  << optionsGroup.GetBackendId() << "\n"
+                   << "\t\t\tOption: " << option.GetName() << "\n"
+                   << "\t\t\tValue: "  << std::string(option.GetValue().ToString()) << "\n";
+        }
+    }
+
+    return stream.str();
+}
+
 Status INetwork::PrintGraph()
 {
     return pNetworkImpl->PrintGraph();
@@ -1581,18 +1769,32 @@
     return result;
 }
 
+// Forwarding function to remain backward compatible with legacy OptimizerOptions
 IOptimizedNetworkPtr Optimize(const Graph& inGraph,
                               const std::vector<BackendId>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
                               const OptimizerOptions& options,
                               Optional<std::vector<std::string>&> messages)
 {
+    return Optimize(inGraph,
+                    backendPreferences,
+                    deviceSpec,
+                    OptimizerOptionsOpaque(options),
+                    messages);
+}
+
+IOptimizedNetworkPtr Optimize(const Graph& inGraph,
+                              const std::vector<BackendId>& backendPreferences,
+                              const IDeviceSpec& deviceSpec,
+                              const OptimizerOptionsOpaque& options,
+                              Optional<std::vector<std::string>&> messages)
+{
     ARMNN_LOG(debug) << options.ToString();
 
     // Enable profiling
     auto profiler = inGraph.GetProfiler();
     ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
-    profiler->EnableProfiling(options.m_ProfilingEnabled);
+    profiler->EnableProfiling(options.GetProfilingEnabled());
 
     ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer");
     if (backendPreferences.empty())
@@ -1600,13 +1802,13 @@
         throw InvalidArgumentException("Invoked Optimize with no backends specified");
     }
 
-    if (options.m_ReduceFp32ToBf16)
+    if (options.GetReduceFp32ToBf16())
     {
         throw InvalidArgumentException("BFloat16 optimization is currently ignored. In order to use Bf16 optimization "
                                        "Please use the FastMathEnabled backend option for CpuAcc or GpuAcc.");
     }
 
-    if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
+    if (options.GetReduceFp32ToFp16() && options.GetReduceFp32ToBf16())
     {
         throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
     }
@@ -1619,9 +1821,9 @@
     // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
     // The mechanism to do that is to add model options to the optimized network.
     armnn::BackendOptions importExport("Global",
-                                        {{"ImportEnabled", options.m_ImportEnabled},
-                                         {"ExportEnabled", options.m_ExportEnabled}});
-    ModelOptions optimizedOptions(options.m_ModelOptions);
+                                        {{"ImportEnabled", options.GetImportEnabled()},
+                                         {"ExportEnabled", options.GetExportEnabled()}});
+    ModelOptions optimizedOptions(options.GetModelOptions());
     optimizedOptions.push_back(importExport);
 
     auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
@@ -1632,7 +1834,7 @@
     // Get the optimized graph
     Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
 
-    if(options.m_shapeInferenceMethod == ShapeInferenceMethod::InferAndValidate)
+    if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
     {
         // Infer the tensor infos for all output slots. Throws an exception on failure
         optGraph.InferTensorInfos();
@@ -1642,7 +1844,7 @@
     using namespace optimizations;
     Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
-    if(options.m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
+    if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::ValidateOnly)
     {
         // Validate the tensor infos for all output slots. Throws an exception on failure
         optGraph.InferTensorInfos();
@@ -1677,8 +1879,8 @@
                                                 FuseBatchNormIntoDepthwiseConvolution2DFloat32(),
                                                 FuseBatchNormIntoDepthwiseConvolution2DFloat16()));
 
-    // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
-    if (options.m_ReduceFp32ToFp16)
+
+    if (options.GetReduceFp32ToFp16())
     {
         ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ReduceFp32ToFp16");
         Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
@@ -1721,7 +1923,7 @@
     OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
                                                                              backendSettings,
                                                                              backends,
-                                                                             options.m_ModelOptions,
+                                                                             options.GetModelOptions(),
                                                                              messages);
     if (backendOptimizationResult.m_Error)
     {
@@ -1739,11 +1941,11 @@
     // This must occur after all topological changes to the graph and any redirection of variables
     // If the debug flag is set, then insert a DebugLayer after each layer
     // Doing this after applying the backend optimizations as they might have changed some layers
-    if (options.m_Debug && !options.m_DebugToFile)
+    if (options.GetDebugEnabled() && !options.GetDebugToFileEnabled())
     {
         Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
     }
-    else if (options.m_DebugToFile)
+    else if (options.GetDebugToFileEnabled())
     {
         // Setup the output file path
         try
@@ -1763,8 +1965,8 @@
     OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
                                                                    backends,
                                                                    tensorHandleFactoryRegistry,
-                                                                   options.m_ImportEnabled,
-                                                                   options.m_ExportEnabled,
+                                                                   options.GetImportEnabled(),
+                                                                   options.GetExportEnabled(),
                                                                    messages);
 
     if (strategyResult.m_Error)
@@ -1782,12 +1984,26 @@
     return optNet;
 }
 
+// Forwarding function to remain backward compatible with legacy OptimizerOptions
 IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
                               const std::vector<BackendId>& backendPreferences,
                               const IDeviceSpec& deviceSpec,
                               const OptimizerOptions& options,
                               Optional<std::vector<std::string>&> messages)
 {
+    return Optimize(inNetwork,
+                    backendPreferences,
+                    deviceSpec,
+                    OptimizerOptionsOpaque(options),
+                    messages);
+}
+
+IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
+                              const std::vector<BackendId>& backendPreferences,
+                              const IDeviceSpec& deviceSpec,
+                              const OptimizerOptionsOpaque& options,
+                              Optional<std::vector<std::string>&> messages)
+{
     return Optimize(inNetwork.pNetworkImpl->GetGraph(),
                     backendPreferences,
                     deviceSpec,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c6bf085..eced458 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -23,6 +23,7 @@
 
 namespace armnn
 {
+
 class Graph;
 
 using NetworkImplPtr = std::unique_ptr<NetworkImpl, void (*)(NetworkImpl* network)>;
@@ -292,4 +293,91 @@
                                   SubgraphView::IConnectableLayerIterator& lastLayer,
                                   Optional<std::vector<std::string>&> errMessages);
 
+struct OptimizerOptionsOpaqueImpl
+{
+    ~OptimizerOptionsOpaqueImpl() = default;
+
+    explicit OptimizerOptionsOpaqueImpl()
+            : m_ReduceFp32ToFp16(false)
+            , m_Debug(false)
+            , m_DebugToFile(false)
+            , m_ReduceFp32ToBf16(false)
+            , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+            , m_ImportEnabled(false)
+            , m_ModelOptions()
+            , m_ProfilingEnabled(false)
+            , m_ExportEnabled(false)
+            , m_AllowExpandedDims(false)
+    {
+    }
+
+    explicit OptimizerOptionsOpaqueImpl(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+                                        bool importEnabled, ModelOptions modelOptions = {},
+                                        bool exportEnabled = false, bool debugToFile = false)
+            : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+            , m_Debug(debug)
+            , m_DebugToFile(debugToFile)
+            , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+            , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+            , m_ImportEnabled(importEnabled)
+            , m_ModelOptions(modelOptions)
+            , m_ProfilingEnabled(false)
+            , m_ExportEnabled(exportEnabled)
+            , m_AllowExpandedDims(false)
+    {
+    }
+
+    explicit OptimizerOptionsOpaqueImpl(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+                                        ShapeInferenceMethod shapeInferenceMethod,
+                                        bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
+                                        bool debugToFile, bool allowExpandedDims)
+            : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+            , m_Debug(debug)
+            , m_DebugToFile(debugToFile)
+            , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+            , m_shapeInferenceMethod(shapeInferenceMethod)
+            , m_ImportEnabled(importEnabled)
+            , m_ModelOptions(modelOptions)
+            , m_ProfilingEnabled(false)
+            , m_ExportEnabled(exportEnabled)
+            , m_AllowExpandedDims(allowExpandedDims)
+    {
+    }
+
+    /// Reduces all Fp32 operators in the model to Fp16 for faster processing.
+    /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
+    ///       between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16.
+    ///       The overhead of these conversions can lead to a slower overall performance if too many conversions are
+    ///       required.
+    bool m_ReduceFp32ToFp16 = false;
+
+    /// Add debug data for easier troubleshooting
+    bool m_Debug = false;
+
+    /// Pass debug data to separate output files for easier troubleshooting
+    bool m_DebugToFile = false;
+
+    /// @Note This feature has been replaced by enabling Fast Math in compute library backend options.
+    /// This is currently a placeholder option
+    bool m_ReduceFp32ToBf16 = false;
+
+    /// Infer output size when not available
+    ShapeInferenceMethod m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
+
+    /// Enable Import
+    bool m_ImportEnabled = false;
+
+    /// Enable Model Options
+    ModelOptions m_ModelOptions;
+
+    /// Enable profiling dump of the optimizer phase
+    bool m_ProfilingEnabled = false;
+
+    /// Enable Export
+    bool m_ExportEnabled = false;
+
+    /// When calculating tensor sizes, dimensions of size == 1 will be ignored
+    bool m_AllowExpandedDims = false;
+};
+
 } // namespace armnn
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index 600447c..e5e7930 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -47,7 +47,7 @@
     IRuntimePtr runtime(IRuntime::Create(options));
 
     // Optimize the network with debug option
-    OptimizerOptions optimizerOptions(false, true);
+    OptimizerOptionsOpaque optimizerOptions(false, true);
     std::vector<BackendId> backends = { "CpuRef" };
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index 8a64a4b..17e4666 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,7 +46,7 @@
 
     try
     {
-        Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
+        Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptionsOpaque(), errMessages);
         FAIL("Should have thrown an exception.");
     }
     catch (const InvalidArgumentException&)
diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp
index cdd86c06..563968a 100644
--- a/src/armnn/test/FlowControl.cpp
+++ b/src/armnn/test/FlowControl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -55,7 +55,7 @@
 
     try
     {
-        Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
+        Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptionsOpaque(), errMessages);
         FAIL("Should have thrown an exception.");
     }
     catch (const InvalidArgumentException&)
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 427352a..6768444 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -543,7 +543,7 @@
         armnn::IOptimizedNetworkPtr optNet = Optimize(*net,
                                                       backends,
                                                       runtime->GetDeviceSpec(),
-                                                      OptimizerOptions(),
+                                                      OptimizerOptionsOpaque(),
                                                       errMessages);
         FAIL("An exception should have been thrown");
     }
@@ -1327,10 +1327,10 @@
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
-    OptimizerOptions optimizedOptions;
+    OptimizerOptionsOpaque optimizedOptions;
     // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = false;
-    optimizedOptions.m_ExportEnabled = false;
+    optimizedOptions.SetImportEnabled(false);
+    optimizedOptions.SetExportEnabled(false);
     IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
@@ -1372,10 +1372,10 @@
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
-    OptimizerOptions optimizedOptions;
+    OptimizerOptionsOpaque optimizedOptions;
     // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = false;
-    optimizedOptions.m_ExportEnabled = false;
+    optimizedOptions.SetImportEnabled(false);
+    optimizedOptions.SetExportEnabled(false);
     IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
@@ -1417,10 +1417,10 @@
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
-    OptimizerOptions optimizedOptions;
+    OptimizerOptionsOpaque optimizedOptions;
     // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = false;
+    optimizedOptions.SetImportEnabled(true);
+    optimizedOptions.SetExportEnabled(false);
     IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
@@ -1462,10 +1462,10 @@
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
-    OptimizerOptions optimizedOptions;
+    OptimizerOptionsOpaque optimizedOptions;
     // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = false;
-    optimizedOptions.m_ExportEnabled = true;
+    optimizedOptions.SetImportEnabled(false);
+    optimizedOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 691adbf..5e11ab6 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -2181,7 +2181,7 @@
     std::vector<armnn::BackendId> backends = {factory.GetBackendId()};
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-    armnn::OptimizerOptions optimizerOptions;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
                                                                optimizerOptions);
     CHECK(optimizedNet != nullptr);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 73cef16..bd5466a 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -210,8 +210,8 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetImportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
@@ -278,9 +278,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetImportEnabled(true);
+    optimizedOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
@@ -353,9 +353,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetImportEnabled(true);
+    optimizedOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
@@ -441,8 +441,8 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetImportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
@@ -531,8 +531,8 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
@@ -620,9 +620,9 @@
     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetImportEnabled(true);
+    optimizedOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
@@ -714,9 +714,9 @@
     activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optimizedOptions;
+    optimizedOptions.SetImportEnabled(true);
+    optimizedOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     // Loads it into the runtime.
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 226e2b3..c5f9869 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -157,8 +157,8 @@
     softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // optimize the network
-    armnn::OptimizerOptions optOptions;
-    optOptions.m_ProfilingEnabled = true;
+    armnn::OptimizerOptionsOpaque optOptions;
+    optOptions.SetProfilingEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
     if(!optNet)
     {
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 5e619df..ce1eea4 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -93,7 +93,7 @@
 
     try
     {
-        Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+        Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptionsOpaque(), errMessages);
         FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException&)
@@ -213,7 +213,8 @@
 
     try
     {
-        Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+        Optimize(*net, backends, runtime->GetDeviceSpec(),
+                 armnn::OptimizerOptionsOpaque(), errMessages);
         FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException&)
@@ -421,7 +422,8 @@
 
     std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
     armnn::ModelOptions modelOptions;
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+    armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
+                                                   false, modelOptions, false);
     std::vector<std::string> errorMessages;
 
     // optimize the network.
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 862ed2e..81a6614 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -88,14 +88,14 @@
     armnn::INetworkPtr net2 = CreateNetwork();
 
     // Add specific optimizerOptions to each network.
-    armnn::OptimizerOptions optimizerOptions1;
-    armnn::OptimizerOptions optimizerOptions2;
+    armnn::OptimizerOptionsOpaque optimizerOptions1;
+    armnn::OptimizerOptionsOpaque optimizerOptions2;
     armnn::BackendOptions modelOptions1("GpuAcc",
                                        {{"SaveCachedNetwork", true}, {"CachedNetworkFilePath", filePathString}});
     armnn::BackendOptions modelOptions2("GpuAcc",
                                         {{"SaveCachedNetwork", false}, {"CachedNetworkFilePath", filePathString}});
-    optimizerOptions1.m_ModelOptions.push_back(modelOptions1);
-    optimizerOptions2.m_ModelOptions.push_back(modelOptions2);
+    optimizerOptions1.AddModelOption(modelOptions1);
+    optimizerOptions2.AddModelOption(modelOptions2);
 
     armnn::IOptimizedNetworkPtr optNet1 = armnn::Optimize(
             *net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 251c98f..1cc2c4c 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -118,9 +118,9 @@
     IRuntimePtr run = IRuntime::Create(options);
 
     // Optimise ArmNN network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
     CHECK(optNet);
 
@@ -188,8 +188,8 @@
     INetworkPtr myNetwork = CreateTestNetwork(inputTensorInfo);
 
     // Optimise ArmNN network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
     IOptimizedNetworkPtr optNet(nullptr, nullptr);
     std::vector<std::string> errMessages;
 
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 9443116..acba449 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -48,9 +48,9 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -196,7 +196,7 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
+    OptimizerOptionsOpaque optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -329,9 +329,9 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -488,7 +488,7 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
+    OptimizerOptionsOpaque optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 1198cad..39619e6 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -140,9 +140,9 @@
     activation->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -337,9 +337,9 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(false);
+    optOptions.SetExportEnabled(false);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -473,9 +473,9 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(false);
+    optOptions.SetExportEnabled(false);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -621,9 +621,9 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(false);
+    optOptions.SetExportEnabled(false);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -760,9 +760,9 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(false);
+    optOptions.SetExportEnabled(false);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -912,9 +912,9 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(false);
+    optOptions.SetExportEnabled(false);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -1138,9 +1138,9 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     // Optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(false);
+    optOptions.SetExportEnabled(false);
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index 6648759..3d4341d 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -86,8 +86,8 @@
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
 
-    armnn::OptimizerOptions optimizerOptions;
-    optimizerOptions.m_ReduceFp32ToFp16 = true;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
+    optimizerOptions.SetReduceFp32ToFp16(true);
 
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
             *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -119,9 +119,9 @@
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-    armnn::OptimizerOptions optimizerOptions;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
     armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
-    optimizerOptions.m_ModelOptions.push_back(modelOptions);
+    optimizerOptions.AddModelOption(modelOptions);
 
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 40df2dc..eeb8107 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -58,9 +58,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -202,9 +202,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -338,9 +338,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -483,9 +483,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -748,9 +748,9 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -901,7 +901,7 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
+    OptimizerOptionsOpaque optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -1040,9 +1040,9 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
-    optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
+    OptimizerOptionsOpaque optOptions;
+    optOptions.SetImportEnabled(true);
+    optOptions.SetExportEnabled(true);
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -1204,7 +1204,7 @@
     sub->BackendSelectionHint(backends[1]);
 
     // optimize the network
-    OptimizerOptions optOptions;
+    OptimizerOptionsOpaque optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index dcda9bf..4b700b0 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -71,7 +71,8 @@
 
     try
     {
-        Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+        Optimize(*net, backends, runtime->GetDeviceSpec(),
+                 armnn::OptimizerOptionsOpaque(), errMessages);
         FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
@@ -95,9 +96,9 @@
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    armnn::OptimizerOptions optimizerOptions;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
     armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
-    optimizerOptions.m_ModelOptions.push_back(modelOptions);
+    optimizerOptions.AddModelOption(modelOptions);
 
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -127,16 +128,16 @@
     unsigned int numberOfThreads = 2;
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-    armnn::OptimizerOptions optimizerOptions;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
     armnn::BackendOptions modelOptions("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
-    optimizerOptions.m_ModelOptions.push_back(modelOptions);
+    optimizerOptions.AddModelOption(modelOptions);
 
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
             *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
     CHECK(optimizedNet);
     std::unique_ptr<armnn::Graph> graphPtr;
-    armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
+    armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.GetModelOptions());
 
     auto modelOptionsOut = impl.GetModelOptions();
 
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 7e8064f..b4a135f 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -187,8 +187,8 @@
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
-    armnn::OptimizerOptions optimizerOptions;
-    optimizerOptions.m_Debug = true;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
+    optimizerOptions.SetDebugEnabled(true);
 
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
                                                                optimizerOptions);
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 29ef4c5..ac857a9 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -518,15 +518,15 @@
 {
     armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
 
-    armnn::OptimizerOptions options;
-    options.m_ReduceFp32ToFp16 = m_Params.m_EnableFp16TurboMode;
-    options.m_Debug = m_Params.m_PrintIntermediate;
-    options.m_DebugToFile = m_Params.m_PrintIntermediateOutputsToFile;
-    options.m_shapeInferenceMethod = m_Params.m_InferOutputShape ?
-                                     armnn::ShapeInferenceMethod::InferAndValidate :
-                                     armnn::ShapeInferenceMethod::ValidateOnly;
-    options.m_ProfilingEnabled = m_Params.m_EnableProfiling;
-    options.m_AllowExpandedDims = m_Params.m_AllowExpandedDims;
+    armnn::OptimizerOptionsOpaque options;
+    options.SetReduceFp32ToFp16(m_Params.m_EnableFp16TurboMode);
+    options.SetDebugEnabled(m_Params.m_PrintIntermediate);
+    options.SetDebugToFileEnabled(m_Params.m_PrintIntermediateOutputsToFile);
+    options.SetShapeInferenceMethod(m_Params.m_InferOutputShape ?
+                                    armnn::ShapeInferenceMethod::InferAndValidate :
+                                    armnn::ShapeInferenceMethod::ValidateOnly);
+    options.SetProfilingEnabled(m_Params.m_EnableProfiling);
+    options.SetAllowExpandedDims(m_Params.m_AllowExpandedDims);
 
     armnn::BackendOptions gpuAcc("GpuAcc",
                                  {
@@ -541,8 +541,8 @@
                                          { "FastMathEnabled", m_Params.m_EnableFastMath },
                                          { "NumberOfThreads", m_Params.m_NumberOfThreads }
                                  });
-    options.m_ModelOptions.push_back(gpuAcc);
-    options.m_ModelOptions.push_back(cpuAcc);
+    options.AddModelOption(gpuAcc);
+    options.AddModelOption(cpuAcc);
     // The shapeInferenceMethod and allowExpandedDims values have to be added to the model options
     // because these are what are passed to the OptimizeSubgraphViews method and are used to create
     // the new optimized INetwork that method uses
@@ -550,12 +550,12 @@
                                         {
                                                 { "AllowExpandedDims", m_Params.m_AllowExpandedDims }
                                         });
-    options.m_ModelOptions.push_back(allowExDimOpt);
+    options.AddModelOption(allowExDimOpt);
     armnn::BackendOptions shapeInferOpt("ShapeInferenceMethod",
                                         {
                                                 { "InferAndValidate", m_Params.m_InferOutputShape }
                                         });
-    options.m_ModelOptions.push_back(shapeInferOpt);
+    options.AddModelOption(shapeInferOpt);
 
     const auto optimization_start_time = armnn::GetTimeNow();
     optNet = armnn::Optimize(*network, m_Params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index fbfd1bc..3628fa4 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -137,33 +137,33 @@
     }
 
     // Optimizer options next.
-    armnn::OptimizerOptions optimizerOptions;
-    optimizerOptions.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
-    optimizerOptions.m_Debug = m_PrintIntermediate;
-    optimizerOptions.m_DebugToFile = m_PrintIntermediateOutputsToFile;
-    optimizerOptions.m_ProfilingEnabled = m_EnableProfiling;
-    optimizerOptions.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
+    armnn::OptimizerOptionsOpaque optimizerOptions;
+    optimizerOptions.SetReduceFp32ToFp16(m_EnableFp16TurboMode);
+    optimizerOptions.SetDebugEnabled(m_PrintIntermediate);
+    optimizerOptions.SetDebugToFileEnabled(m_PrintIntermediateOutputsToFile);
+    optimizerOptions.SetProfilingEnabled(m_EnableProfiling);
+    optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly);
     if (m_InferOutputShape)
     {
-        optimizerOptions.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::InferAndValidate;
+        optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::InferAndValidate);
         armnn::BackendOptions networkOption("ShapeInferenceMethod",
                                             {
                                                 {"InferAndValidate", true}
                                             });
-        optimizerOptions.m_ModelOptions.push_back(networkOption);
+        optimizerOptions.AddModelOption(networkOption);
     }
 
     {
         armnn::BackendOptions option("GpuAcc", {{"FastMathEnabled", m_EnableFastMath}});
-        optimizerOptions.m_ModelOptions.push_back(option);
+        optimizerOptions.AddModelOption(option);
     }
     {
         armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath", m_CachedNetworkFilePath}});
-        optimizerOptions.m_ModelOptions.push_back(option);
+        optimizerOptions.AddModelOption(option);
     }
     {
         armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", m_MLGOTuningFilePath}});
-        optimizerOptions.m_ModelOptions.push_back(option);
+        optimizerOptions.AddModelOption(option);
     }
 
     armnn::BackendOptions cpuAcc("CpuAcc",
@@ -171,14 +171,14 @@
         { "FastMathEnabled", m_EnableFastMath },
         { "NumberOfThreads", m_NumberOfThreads }
                                  });
-    optimizerOptions.m_ModelOptions.push_back(cpuAcc);
+    optimizerOptions.AddModelOption(cpuAcc);
     if (m_AllowExpandedDims)
     {
         armnn::BackendOptions networkOption("AllowExpandedDims",
                                             {
                                                     {"AllowExpandedDims", true}
                                             });
-        optimizerOptions.m_ModelOptions.push_back(networkOption);
+        optimizerOptions.AddModelOption(networkOption);
     }
     delegateOptions.SetOptimizerOptions(optimizerOptions);
     return delegateOptions;
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index fa1b1b0..c053a44 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -455,13 +455,13 @@
 
             ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
 
-            armnn::OptimizerOptions options;
-            options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
-            options.m_Debug = params.m_PrintIntermediateLayers;
-            options.m_DebugToFile = params.m_PrintIntermediateLayersToFile;
-            options.m_shapeInferenceMethod = params.m_InferOutputShape ?
-                    armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly;
-            options.m_ProfilingEnabled = m_EnableProfiling;
+            armnn::OptimizerOptionsOpaque options;
+            options.SetReduceFp32ToFp16(params.m_EnableFp16TurboMode);
+            options.SetDebugEnabled(params.m_PrintIntermediateLayers);
+            options.SetDebugToFileEnabled(params.m_PrintIntermediateLayersToFile);
+            options.SetShapeInferenceMethod(params.m_InferOutputShape ?
+                    armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly);
+            options.SetProfilingEnabled(m_EnableProfiling);
 
             armnn::BackendOptions gpuAcc("GpuAcc",
             {
@@ -476,8 +476,8 @@
                 { "FastMathEnabled", params.m_EnableFastMath },
                 { "NumberOfThreads", params.m_NumberOfThreads }
             });
-            options.m_ModelOptions.push_back(gpuAcc);
-            options.m_ModelOptions.push_back(cpuAcc);
+            options.AddModelOption(gpuAcc);
+            options.AddModelOption(cpuAcc);
 
             const auto optimization_start_time = armnn::GetTimeNow();
             optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
diff --git a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
index 75bc9a3..3ecd160 100644
--- a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
+++ b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
@@ -128,8 +128,8 @@
     ARMNN_LOG(debug) << "Model loaded ok: " << filename;
 
     // Optimize backbone model
-    OptimizerOptions options;
-    options.m_ImportEnabled = enableImport != ImportMemory::False;
+    OptimizerOptionsOpaque options;
+    options.SetImportEnabled(enableImport != ImportMemory::False);
     auto optimizedModel = Optimize(*model, backendPreferences, runtime.GetDeviceSpec(), options);
     if (!optimizedModel)
     {
@@ -149,7 +149,7 @@
     {
         std::string errorMessage;
 
-        armnn::MemorySource memSource = options.m_ImportEnabled ? armnn::MemorySource::Malloc
+        armnn::MemorySource memSource = options.GetImportEnabled() ? armnn::MemorySource::Malloc
                                                                 : armnn::MemorySource::Undefined;
         INetworkProperties modelProps(false, memSource, memSource);
         Status status = runtime.LoadNetwork(networkId, std::move(optimizedModel), errorMessage, modelProps);