Revert "IVGCVSW-6873 Import inputs but don't export outputs fails."

This reverts commit 03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721.

Reason for revert: Caused failures in tests located in internal repo.

Change-Id: If35cb0ede349b270e4e7827324382e09455d8cfa
diff --git a/delegate/include/Version.hpp b/delegate/include/Version.hpp
index c14857e..34555b2 100644
--- a/delegate/include/Version.hpp
+++ b/delegate/include/Version.hpp
@@ -14,7 +14,7 @@
 
 // ArmNN Delegate version components
 #define DELEGATE_MAJOR_VERSION 26
-#define DELEGATE_MINOR_VERSION 1
+#define DELEGATE_MINOR_VERSION 0
 #define DELEGATE_PATCH_VERSION 0
 
 /// DELEGATE_VERSION: "X.Y.Z"
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 1b6d68e..bb2f3c3 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -394,20 +394,14 @@
         // Load graph into runtime
         std::string errorMessage;
         armnn::Status loadingStatus;
-        armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
-        armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
-        // There's a bit of an assumption here that the delegate will only support Malloc memory source.
+        armnn::MemorySource memorySource = armnn::MemorySource::Undefined;
         if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
         {
-            inputSource = armnn::MemorySource::Malloc;
-        }
-        if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
-        {
-            outputSource = armnn::MemorySource::Malloc;
+            memorySource = armnn::MemorySource::Malloc;
         }
         armnn::INetworkProperties networkProperties(false,
-                                                    inputSource,
-                                                    outputSource,
+                                                    memorySource,
+                                                    memorySource,
                                                     delegate->m_Options.GetInternalProfilingState(),
                                                     delegate->m_Options.GetInternalProfilingDetail());
         loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp
index c9f1530..126bf30 100644
--- a/delegate/src/test/DelegateOptionsTest.cpp
+++ b/delegate/src/test/DelegateOptionsTest.cpp
@@ -173,7 +173,7 @@
                                  });
     modelOptions.push_back(cpuAcc);
 
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 475367e..89b4776 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -144,11 +144,10 @@
         , m_ImportEnabled(false)
         , m_ModelOptions()
         , m_ProfilingEnabled(false)
-        , m_ExportEnabled(false)
     {}
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
-                     ModelOptions modelOptions = {}, bool exportEnabled = false)
+                     ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -156,7 +155,6 @@
         , m_ImportEnabled(importEnabled)
         , m_ModelOptions(modelOptions)
         , m_ProfilingEnabled(false)
-        , m_ExportEnabled(exportEnabled)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -166,7 +164,7 @@
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
-                     bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false)
+                     bool importEnabled = false, ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -174,7 +172,6 @@
         , m_ImportEnabled(importEnabled)
         , m_ModelOptions(modelOptions)
         , m_ProfilingEnabled(false)
-        , m_ExportEnabled(exportEnabled)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -192,7 +189,6 @@
         stream << "\tShapeInferenceMethod: " <<
         (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
         stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
-        stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
         stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
 
         stream << "\tModelOptions: \n";
@@ -238,9 +234,6 @@
 
     // Enable profiling dump of the optimizer phase
     bool m_ProfilingEnabled;
-
-    // Enable Export
-    bool m_ExportEnabled;
 };
 
 class IWorkloadFactory;
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 7951eac..d41c4ec 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
 #define STRINGIFY_MACRO(s) #s
 
 // ArmNN version components
-#define ARMNN_MAJOR_VERSION 30
+#define ARMNN_MAJOR_VERSION 29
 #define ARMNN_MINOR_VERSION 0
 #define ARMNN_PATCH_VERSION 0
 
diff --git a/include/armnnOnnxParser/Version.hpp b/include/armnnOnnxParser/Version.hpp
index 33a2846..ed9d869 100644
--- a/include/armnnOnnxParser/Version.hpp
+++ b/include/armnnOnnxParser/Version.hpp
@@ -14,7 +14,7 @@
 
 // OnnxParser version components
 #define ONNX_PARSER_MAJOR_VERSION 24
-#define ONNX_PARSER_MINOR_VERSION 5
+#define ONNX_PARSER_MINOR_VERSION 4
 #define ONNX_PARSER_PATCH_VERSION 0
 
 /// ONNX_PARSER_VERSION: "X.Y.Z"
diff --git a/include/armnnTfLiteParser/Version.hpp b/include/armnnTfLiteParser/Version.hpp
index 5db527e..eee2124 100644
--- a/include/armnnTfLiteParser/Version.hpp
+++ b/include/armnnTfLiteParser/Version.hpp
@@ -14,7 +14,7 @@
 
 // TfLiteParser version components
 #define TFLITE_PARSER_MAJOR_VERSION 24
-#define TFLITE_PARSER_MINOR_VERSION 5
+#define TFLITE_PARSER_MINOR_VERSION 4
 #define TFLITE_PARSER_PATCH_VERSION 0
 
 /// TFLITE_PARSER_VERSION: "X.Y.Z"
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 6d8b42d..7dc8d86 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -91,14 +91,14 @@
 ```bash
 $ python setup.py sdist
 ```
-As the result you will get `./dist/pyarmnn-30.0.0.tar.gz` file. As you can see it is platform independent.
+As the result you will get `./dist/pyarmnn-29.0.0.tar.gz` file. As you can see it is platform independent.
 
 ##### 5. Build the binary package
 
 ```bash
 $ python setup.py bdist_wheel
 ```
-As the result you will get something like `./dist/pyarmnn-30.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
+As the result you will get something like `./dist/pyarmnn-29.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
  is platform dependent.
 
 # PyArmNN installation
@@ -107,8 +107,8 @@
 
 Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
 
-* Linux x86 64bit machine: pyarmnn-30.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-30.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-29.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-29.0.0-cp36-cp36m-*linux_aarch64*.whl
 
 The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
 
@@ -126,7 +126,7 @@
 ```
 Install PyArmNN from binary by pointing to the wheel file:
 ```bash
-$ pip install /path/to/pyarmnn-30.0.0-cp36-cp36m-linux_aarch64.whl
+$ pip install /path/to/pyarmnn-29.0.0-cp36-cp36m-linux_aarch64.whl
 ```
 
 ## Installing from source package
@@ -143,7 +143,7 @@
 
 Install PyArmNN as follows:
 ```bash
-$ pip install /path/to/pyarmnn-30.0.0.tar.gz
+$ pip install /path/to/pyarmnn-29.0.0.tar.gz
 ```
 
 If PyArmNN installation script fails to find Arm NN libraries it will raise an error like this
@@ -157,7 +157,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
 ```
 
 # PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index a360f01..7275a25 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@
 You can also verify it by running the following and getting output similar to below:

 ```bash

 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"

-'30.0.0'

+'29.0.0'

 ```

 

 ##### Dependencies

diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 215cf77..7a946ad 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -54,7 +54,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
 ```
 
 ##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index d5fee8a..854cdaf 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@
 
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
 ```
 
 ### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index d1b1ca2..7c0940e 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 import os
 
-version_info = (30, 0, 0)
+version_info = (29, 0, 0)
 
 __dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
 
@@ -24,7 +24,7 @@
     """Compares expected Arm NN version and Arm NN version used to build the package.
 
     Args:
-        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 30.0.0)
+        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 29.0.0)
         expected_armnn_version (str): Expected Arm NN version
 
     Returns:
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index 55b6795..a2f57a3 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -29,7 +29,7 @@
                                that can not be reduced will be left in Fp32.
     m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers
                                that can not be reduced will be left in Fp32.
-    m_ImportEnabled (bool):    Enable memory import of inport tensors.
+    m_ImportEnabled (bool):    Enable memory import.
     m_shapeInferenceMethod:    The ShapeInferenceMethod modifies how the output shapes are treated.
                                When ValidateOnly is selected, the output shapes are inferred from the input parameters
                                of the layer and any mismatch is reported.
@@ -38,7 +38,6 @@
                                with tensors which rank or dimension sizes are not specified explicitly, however this
                                information can be calculated from the inputs.
     m_ModelOptions:            List of backends optimisation options.
-    m_ExportEnabled (bool):    Enable memory export of output tensors.
 
 ") OptimizerOptions;
 
@@ -52,8 +51,7 @@
                      bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
                      bool importEnabled = false,
-                     std::vector<armnn::BackendOptions> modelOptions = {},
-                     bool exportEnabled = false);
+                     std::vector<armnn::BackendOptions> modelOptions = {});
 
     bool m_ReduceFp32ToBf16;
     bool m_ReduceFp32ToFp16;
@@ -61,7 +59,6 @@
     ShapeInferenceMethod m_shapeInferenceMethod;
     bool m_ImportEnabled;
     std::vector<armnn::BackendOptions> m_ModelOptions;
-    bool m_ExportEnabled;
 };
 %model_options_clear;
 
diff --git a/python/pyarmnn/test/test_modeloption.py b/python/pyarmnn/test/test_modeloption.py
index a47d2da..c03d4a8 100644
--- a/python/pyarmnn/test/test_modeloption.py
+++ b/python/pyarmnn/test/test_modeloption.py
@@ -71,8 +71,7 @@
                           False,
                           ShapeInferenceMethod_InferAndValidate,
                           True,
-                          [a],
-                          True)
+                          [a])
 
     mo = oo.m_ModelOptions
 
@@ -113,8 +112,7 @@
                          False,
                          ShapeInferenceMethod_InferAndValidate,
                          True,
-                         a,
-                         True)
+                         a)
 
     assert "Wrong number or type of arguments" in str(err.value)
 
@@ -124,8 +122,7 @@
                          True,
                          ShapeInferenceMethod_InferAndValidate,
                          True,
-                         [a],
-                         True)
+                         [a])
 
     assert "BFloat16 and Float16 optimization cannot be enabled at the same time" in str(err.value)
 
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index a6c4e1d..a37772c 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -156,8 +156,8 @@
     opt_network, _ = ann.Optimize(network, preferred_backends,
                                   runtime.GetDeviceSpec(), ann.OptimizerOptions())
 
-    inputSource = ann.MemorySource_Undefined
-    outputSource = ann.MemorySource_Undefined
+    inputSource = ann.MemorySource_Malloc
+    outputSource = ann.MemorySource_Malloc
     properties = ann.INetworkProperties(False, inputSource, outputSource)
     net_id, messages = runtime.LoadNetwork(opt_network, properties)
     assert "" == messages
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index 27feda2..4a6f930 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@
 
 
 def test_armnn_version():
-    check_armnn_version('30.0.0', '30.0.0')
+    check_armnn_version('29.0.0', '29.0.0')
 
 
 def test_incorrect_armnn_version():
     with pytest.raises(AssertionError) as err:
-        check_armnn_version('30.0.0', '30.1.0')
+        check_armnn_version('29.0.0', '29.1.0')
 
-    assert 'Expected ArmNN version is 30.1.0 but installed ArmNN version is 30.0.0' in str(err.value)
+    assert 'Expected ArmNN version is 29.1.0 but installed ArmNN version is 29.0.0' in str(err.value)
 
 
 def test_armnn_version_patch_does_not_matter():
-    check_armnn_version('30.0.0', '30.0.1')
+    check_armnn_version('29.0.0', '29.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index 83606ab..f74ae02 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@
 
     importlib.reload(v)
 
-    assert "30.0.0.dev1" == v.__version__
+    assert "29.0.0.dev1" == v.__version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
@@ -30,7 +30,7 @@
 
     importlib.reload(v)
 
-    assert "30.0.0" == v.__arm_ml_version__
+    assert "29.0.0" == v.__arm_ml_version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index bd84e26..194a3e9 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -253,8 +253,8 @@
 The full list of libs after cross-compilation to copy on your board:
 ```
 libarmnn.so
-libarmnn.so.30
-libarmnn.so.30.0
+libarmnn.so.29
+libarmnn.so.29.0
 For Arm NN public C++ API mode:
 libarmnnTfLiteParser.so
 libarmnnTfLiteParser.so.24.4
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index a27add9..ec79d5d 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -84,87 +84,6 @@
 
 } // anonymous
 
-/**
- * This function performs a sanity check to ensure that the combination of input and output memory source matches the
- * values for importEnabled and exportEnabled that were specified during optimization. During optimization the tensor
- * handle factories are chosen based on whether import and export are enabled. If the user then specifies something
- * incompatible here it can lead to problems.
- *
- * @param optimizedOptions
- * @param networkProperties
- */
-void ValidateSourcesMatchOptimizedNetwork(std::vector<BackendOptions> optimizedOptions,
-                                          const INetworkProperties& networkProperties)
-{
-    // Find the "Global" backend options. During the optimize phase the values of importEnabled and exportEnabled are
-    // added as backend options.
-    const vector<BackendOptions>::iterator& backendItr =
-        find_if(optimizedOptions.begin(), optimizedOptions.end(), [](const BackendOptions& backend) {
-            if (backend.GetBackendId().Get() == "Global")
-            {
-                return true;
-            }
-            else
-            {
-                return false;
-            }
-        });
-    bool importEnabled = false;
-    bool exportEnabled = false;
-    if (backendItr != optimizedOptions.end())
-    {
-        // Find the importEnabled and exportEnabled values.
-        for (size_t i = 0; i < backendItr->GetOptionCount(); i++)
-        {
-            const BackendOptions::BackendOption& option = backendItr->GetOption(i);
-            if (option.GetName() == "ImportEnabled")
-            {
-                importEnabled = option.GetValue().AsBool();
-            }
-            if (option.GetName() == "ExportEnabled")
-            {
-                exportEnabled = option.GetValue().AsBool();
-            }
-        }
-    }
-
-    // Now that we have values for import and export compare them to the MemorySource variables.
-    // Any value of MemorySource that's not "Undefined" implies that we need to do an import of some kind.
-    if ((networkProperties.m_InputSource == MemorySource::Undefined && importEnabled) ||
-        (networkProperties.m_InputSource != MemorySource::Undefined && !importEnabled))
-    {
-        auto message = fmt::format("The input memory source specified, '{0}',", networkProperties.m_InputSource);
-        if (!importEnabled)
-        {
-            message.append(" requires that memory import be enabled. However, "
-                           "it was disabled when this network was optimized.");
-        }
-        else
-        {
-            message.append(" requires that memory import be disabled. However, "
-                           "it was enabled when this network was optimized.");
-        }
-        throw InvalidArgumentException(message);
-    }
-
-    if ((networkProperties.m_OutputSource == MemorySource::Undefined && exportEnabled) ||
-        (networkProperties.m_OutputSource != MemorySource::Undefined && !exportEnabled))
-    {
-        auto message = fmt::format("The output memory source specified, '{0}',", networkProperties.m_OutputSource);
-        if (!exportEnabled)
-        {
-            message.append(" requires that memory export be enabled. However, "
-                           "it was disabled when this network was optimized.");
-        }
-        else
-        {
-            message.append(" requires that memory export be disabled. However, "
-                           "it was enabled when this network was optimized.");
-        }
-        throw InvalidArgumentException(message);
-    }
-} // anonymous
-
 std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                                                                 std::string& errorMessage,
                                                                 const INetworkProperties& networkProperties,
@@ -217,11 +136,6 @@
 
     profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
 
-    // We need to check that the memory sources match up with the values of import and export specified during the
-    // optimize phase. If they don't this will throw an exception.
-    ValidateSourcesMatchOptimizedNetwork(m_OptimizedNetwork.get()->pOptimizedNetworkImpl->GetModelOptions(),
-                                         m_NetworkProperties);
-
     //First create tensor handlers, backends and workload factories.
     //Handlers are created before workloads are.
     //Because workload creation can modify some of the handlers,
@@ -1525,7 +1439,7 @@
 
             ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
 
-            if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
+            if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_InputSource))
             {
                 throw MemoryImportException(
                     fmt::format("ImportInputs: Memory Import failed, backend: "
@@ -1537,7 +1451,7 @@
                     std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
                                                                    inputTensor.second.GetMemoryArea());
 
-            if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource))
+            if (tensorHandle->Import(passThroughTensorHandle->Map(), m_NetworkProperties.m_InputSource))
             {
                 importedInputs.push_back(m_CurImportedInputId++);
                 passThroughTensorHandle->Unmap();
@@ -1650,14 +1564,14 @@
 
         ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
 
-        if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
+        if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_OutputSource))
         {
             throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: "
                                                     "{} does not support importing from source {}"
-                                                    , factoryId, forceImportMemorySource));
+                                                    , factoryId, m_NetworkProperties.m_OutputSource));
         }
 
-        if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
+        if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), m_NetworkProperties.m_OutputSource))
         {
             importedOutputs.push_back(m_CurImportedOutputId++);
         }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9520c13..f2ba94f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1362,7 +1362,7 @@
 ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
                                                     OutputSlot& outputSlot,
                                                     TensorHandleFactoryRegistry& registry,
-                                                    bool exportEnabled)
+                                                    bool importEnabled)
 {
     // First ensure the from backends can support the TensorHandeAPI
     Layer& layer = outputSlot.GetOwningLayer();
@@ -1390,7 +1390,7 @@
     std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
     for (auto&& pref : srcPrefs)
     {
-        if (exportEnabled)
+        if (importEnabled)
         {
             ITensorHandleFactory* factory = registry.GetFactory(pref);
             if (outputConnection)
@@ -1602,13 +1602,12 @@
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
                                               bool importEnabled,
-                                              bool exportEnabled,
                                               Optional<std::vector<std::string>&> errMessages)
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
     OptimizationResult result;
 
-    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
+    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
     {
         ARMNN_ASSERT(layer);
 
@@ -1633,7 +1632,7 @@
                     slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
                     break;
                 default:
-                    slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
+                    slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled);
                     break;
             }
             outputSlot.SetTensorHandleFactory(slotOption);
@@ -1697,15 +1696,7 @@
 
     std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
 
-    // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
-    // The mechanism to do that is to add model options to the optimized network.
-    armnn::BackendOptions importExport("Global",
-                                        {{"ImportEnabled", options.m_ImportEnabled},
-                                         {"ExportEnabled", options.m_ExportEnabled}});
-    ModelOptions optimizedOptions(options.m_ModelOptions);
-    optimizedOptions.push_back(importExport);
-
-    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
+    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
                                        &IOptimizedNetwork::Destroy);
 
     IOptimizedNetwork* optNetObjPtr = optNet.get();
@@ -1828,9 +1819,7 @@
                                                                    backends,
                                                                    tensorHandleFactoryRegistry,
                                                                    options.m_ImportEnabled,
-                                                                   options.m_ExportEnabled,
                                                                    messages);
-
     if (strategyResult.m_Error)
     {
         // Failed to apply the backend-specific optimizations
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2d34cfc..6c7c2f5 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -300,7 +300,6 @@
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
                                               bool importEnabled,
-                                              bool exportEnabled,
                                               Optional<std::vector<std::string>&> errMessages);
 
 OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index f5dfadf..376cdbc 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -56,9 +56,9 @@
     armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
 
     std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
-                                              MemorySource forceImportMemorySource);
+                                              MemorySource forceImportMemorySource = MemorySource::Undefined);
     std::vector<ImportedOutputId> ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
-                                                MemorySource forceImportMemorySource);
+                                                MemorySource forceImportMemorySource = MemorySource::Undefined);
 
     void ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds);
     void ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds);
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 59f6554..3cbe884 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -93,7 +93,7 @@
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
     std::string er;
-    armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
+    armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Undefined);
     runtime->LoadNetwork(networkId,
                          Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
                          er,
@@ -107,7 +107,7 @@
     ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
     Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
 
-    auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc);
+    auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}});
     CHECK(importedInputVec1.size() == 1);
     CHECK(importedInputVec1[0] == 0);
 
@@ -118,7 +118,7 @@
         CHECK(val == 30);
     }
 
-    auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
+    auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
     CHECK(importedInputVec2.size() == 1);
     CHECK(importedInputVec2[0] == 1);
 
@@ -146,7 +146,7 @@
     // Incorrect layer binding id and ImportedInputId
     CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});,
                     armnn::InvalidArgumentException);
-    auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
+    auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
     CHECK(importedInputVec3[0] == 2);
     // Too many ImportedInputIds
     CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});,
@@ -175,7 +175,6 @@
     // Trying to delete unknown pre-imported tensor
     CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException);
 }
-
 TEST_CASE("RuntimePreImportOutputs")
 {
     armnn::IRuntime::CreationOptions options;
@@ -217,7 +216,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
 
     std::string er;
-    armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
+    armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc);
     runtime->LoadNetwork(networkId,
                          Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
                          er,
@@ -258,7 +257,7 @@
     runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
     testOutputs();
 
-    auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc);
+    auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 });
     CHECK(importedOutputVec.size() == 2);
     CHECK(importedOutputVec[0] == 0);
     CHECK(importedOutputVec[1] == 1);
@@ -272,7 +271,7 @@
     runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
     testOutputs();
 
-    auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
+    auto importedInputVec = runtime->ImportInputs(networkId, inputTensors);
     CHECK(importedInputVec.size() == 2);
     CHECK(importedInputVec[0] == 0);
     CHECK(importedInputVec[1] == 1);
@@ -1294,176 +1293,4 @@
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
 }
 
-TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
-{
-    // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
-    // that network but specify that the import memory source is Malloc.
-
-    armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-    armnn::NetworkId networkId = 1;
-    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
-    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
-    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
-    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
-    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
-    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
-    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
-    OptimizerOptions optimizedOptions;
-    // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = false;
-    optimizedOptions.m_ExportEnabled = false;
-    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
-    CHECK(optNet);
-
-    std::string er;
-    // Load the network passing an import memory source.
-    armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined);
-    // There should be an InvalidArgumentException.
-    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
-    CHECK(er.find("However, it was disabled when this network was optimized") != -1);
-}
-
-TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
-{
-    // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
-    // that network but specify that the export memory source as Malloc.
-
-    armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-    armnn::NetworkId networkId = 1;
-    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
-    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
-    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
-    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
-    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
-    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
-    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
-    OptimizerOptions optimizedOptions;
-    // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = false;
-    optimizedOptions.m_ExportEnabled = false;
-    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
-    CHECK(optNet);
-
-    std::string er;
-    // Load the network passing an import memory source.
-    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc);
-    // There should be an InvalidArgumentException.
-    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
-    CHECK(er.find("However, it was disabled when this network was optimized") != -1);
-}
-
-TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
-{
-    // In this test case we'll optimize a network with import enabled. Then we'll attempt to load
-    // that network but specify that the import memory source is Undefined.
-
-    armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-    armnn::NetworkId networkId = 1;
-    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
-    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
-    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
-    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
-    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
-    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
-    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
-    OptimizerOptions optimizedOptions;
-    // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = false;
-    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
-    CHECK(optNet);
-
-    std::string er;
-    // Load the network passing an import memory source.
-    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
-    // There should be an InvalidArgumentException.
-    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
-    CHECK(er.find("However, it was enabled when this network was optimized") != -1);
-}
-
-TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
-{
-    // In this test case we'll optimize a network with export enabled. Then we'll attempt to load
-    // that network but specify that the export memory source is Undefined.
-
-    armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-    armnn::NetworkId networkId = 1;
-    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
-    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
-    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
-    auto addLayer = testNetwork->AddAdditionLayer("add layer");
-    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
-    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
-    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
-    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
-    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
-    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
-    OptimizerOptions optimizedOptions;
-    // Hard set import and export to off.
-    optimizedOptions.m_ImportEnabled = false;
-    optimizedOptions.m_ExportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
-    CHECK(optNet);
-
-    std::string er;
-    // Load the network passing an import memory source.
-    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
-    // There should be an InvalidArgumentException.
-    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
-    CHECK(er.find("However, it was enabled when this network was optimized") != -1);
-}
-
 }
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 2ea3c2a..c591fff 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -342,7 +342,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
     CHECK(result.m_Error == false);
     CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index 9c85ffc..c69a4b5 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -73,7 +73,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
     CHECK(result.m_Error == false);
     CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index cc5aa23..77901df 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -204,9 +204,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -271,10 +269,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -345,10 +340,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -432,9 +424,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -524,9 +514,7 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ExportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -613,10 +601,7 @@
     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -709,10 +694,7 @@
     activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
 
     // Optimize the network
-    OptimizerOptions optimizedOptions;
-    optimizedOptions.m_ImportEnabled = true;
-    optimizedOptions.m_ExportEnabled = true;
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
     // Loads it into the runtime.
     NetworkId netId;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index cd865de..bcea061 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -421,7 +421,7 @@
 
     std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
     armnn::ModelOptions modelOptions;
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
     std::vector<std::string> errorMessages;
 
     // optimize the network.
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 251c98f..139e688 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -120,7 +120,6 @@
     // Optimise ArmNN network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
     CHECK(optNet);
 
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 51a983a..6ac9433 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -50,7 +50,6 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -331,7 +330,6 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 9a075d2..20537b3 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -142,7 +142,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -339,7 +338,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -472,7 +470,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -616,7 +613,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -751,7 +747,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -901,7 +896,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -1123,7 +1117,6 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
-    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index 6648759..cf17eae 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -130,7 +130,7 @@
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
+    CHECK(modelOptionsOut.size() == 1);
     CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
     CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 8e0e0ab..d2de843 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -60,7 +60,6 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -204,7 +203,6 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -340,7 +338,6 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -485,7 +482,6 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -750,7 +746,6 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -1042,7 +1037,6 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
-    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index dcda9bf..9b448b2 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -106,7 +106,7 @@
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
+    CHECK(modelOptionsOut.size() == 1);
     CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
     CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }