IVGCVSW-6873 Import inputs but don't export outputs fails.

Only one bool is used to indicate whether inputs should be imported.
However, its possible for the user to want to import inputs but not
export outputs. In addition it's possible for a user to enabled import
during optimize but then pass a memory source that does not require
import.

* Add m_ExportEnabled to INetwork.hpp.
* Modify Network::dNetwork to consider both m_ImportEnabled
  and m_ExportEnabled.
* Add ValidateSourcesMatchOptimizedNetwork to LoadedNetwork to validate
  import options between optimize and network load.
* Update the TfLite delegate consider exportEnabled flag in the
  optimizer.

!armnn-internal-tests:425350
Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: I776eab81595898e43f91ab40306962eae61329f4
diff --git a/delegate/include/Version.hpp b/delegate/include/Version.hpp
index 34555b2..c14857e 100644
--- a/delegate/include/Version.hpp
+++ b/delegate/include/Version.hpp
@@ -14,7 +14,7 @@
 
 // ArmNN Delegate version components
 #define DELEGATE_MAJOR_VERSION 26
-#define DELEGATE_MINOR_VERSION 0
+#define DELEGATE_MINOR_VERSION 1
 #define DELEGATE_PATCH_VERSION 0
 
 /// DELEGATE_VERSION: "X.Y.Z"
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index bb2f3c3..1b6d68e 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -394,14 +394,20 @@
         // Load graph into runtime
         std::string errorMessage;
         armnn::Status loadingStatus;
-        armnn::MemorySource memorySource = armnn::MemorySource::Undefined;
+        armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
+        armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
+        // There's a bit of an assumption here that the delegate will only support Malloc memory source.
         if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
         {
-            memorySource = armnn::MemorySource::Malloc;
+            inputSource = armnn::MemorySource::Malloc;
+        }
+        if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
+        {
+            outputSource = armnn::MemorySource::Malloc;
         }
         armnn::INetworkProperties networkProperties(false,
-                                                    memorySource,
-                                                    memorySource,
+                                                    inputSource,
+                                                    outputSource,
                                                     delegate->m_Options.GetInternalProfilingState(),
                                                     delegate->m_Options.GetInternalProfilingDetail());
         loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp
index 126bf30..c9f1530 100644
--- a/delegate/src/test/DelegateOptionsTest.cpp
+++ b/delegate/src/test/DelegateOptionsTest.cpp
@@ -173,7 +173,7 @@
                                  });
     modelOptions.push_back(cpuAcc);
 
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
+    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 89b4776..475367e 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -144,10 +144,11 @@
         , m_ImportEnabled(false)
         , m_ModelOptions()
         , m_ProfilingEnabled(false)
+        , m_ExportEnabled(false)
     {}
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
-                     ModelOptions modelOptions = {})
+                     ModelOptions modelOptions = {}, bool exportEnabled = false)
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -155,6 +156,7 @@
         , m_ImportEnabled(importEnabled)
         , m_ModelOptions(modelOptions)
         , m_ProfilingEnabled(false)
+        , m_ExportEnabled(exportEnabled)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -164,7 +166,7 @@
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
-                     bool importEnabled = false, ModelOptions modelOptions = {})
+                     bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false)
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -172,6 +174,7 @@
         , m_ImportEnabled(importEnabled)
         , m_ModelOptions(modelOptions)
         , m_ProfilingEnabled(false)
+        , m_ExportEnabled(exportEnabled)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -189,6 +192,7 @@
         stream << "\tShapeInferenceMethod: " <<
         (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
         stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
+        stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
         stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
 
         stream << "\tModelOptions: \n";
@@ -234,6 +238,9 @@
 
     // Enable profiling dump of the optimizer phase
     bool m_ProfilingEnabled;
+
+    // Enable Export
+    bool m_ExportEnabled;
 };
 
 class IWorkloadFactory;
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index d41c4ec..7951eac 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
 #define STRINGIFY_MACRO(s) #s
 
 // ArmNN version components
-#define ARMNN_MAJOR_VERSION 29
+#define ARMNN_MAJOR_VERSION 30
 #define ARMNN_MINOR_VERSION 0
 #define ARMNN_PATCH_VERSION 0
 
diff --git a/include/armnnOnnxParser/Version.hpp b/include/armnnOnnxParser/Version.hpp
index ed9d869..33a2846 100644
--- a/include/armnnOnnxParser/Version.hpp
+++ b/include/armnnOnnxParser/Version.hpp
@@ -14,7 +14,7 @@
 
 // OnnxParser version components
 #define ONNX_PARSER_MAJOR_VERSION 24
-#define ONNX_PARSER_MINOR_VERSION 4
+#define ONNX_PARSER_MINOR_VERSION 5
 #define ONNX_PARSER_PATCH_VERSION 0
 
 /// ONNX_PARSER_VERSION: "X.Y.Z"
diff --git a/include/armnnTfLiteParser/Version.hpp b/include/armnnTfLiteParser/Version.hpp
index eee2124..5db527e 100644
--- a/include/armnnTfLiteParser/Version.hpp
+++ b/include/armnnTfLiteParser/Version.hpp
@@ -14,7 +14,7 @@
 
 // TfLiteParser version components
 #define TFLITE_PARSER_MAJOR_VERSION 24
-#define TFLITE_PARSER_MINOR_VERSION 4
+#define TFLITE_PARSER_MINOR_VERSION 5
 #define TFLITE_PARSER_PATCH_VERSION 0
 
 /// TFLITE_PARSER_VERSION: "X.Y.Z"
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 7dc8d86..6d8b42d 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -91,14 +91,14 @@
 ```bash
 $ python setup.py sdist
 ```
-As the result you will get `./dist/pyarmnn-29.0.0.tar.gz` file. As you can see it is platform independent.
+As the result you will get `./dist/pyarmnn-30.0.0.tar.gz` file. As you can see it is platform independent.
 
 ##### 5. Build the binary package
 
 ```bash
 $ python setup.py bdist_wheel
 ```
-As the result you will get something like `./dist/pyarmnn-29.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
+As the result you will get something like `./dist/pyarmnn-30.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
  is platform dependent.
 
 # PyArmNN installation
@@ -107,8 +107,8 @@
 
 Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
 
-* Linux x86 64bit machine: pyarmnn-29.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-29.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-30.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-30.0.0-cp36-cp36m-*linux_aarch64*.whl
 
 The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
 
@@ -126,7 +126,7 @@
 ```
 Install PyArmNN from binary by pointing to the wheel file:
 ```bash
-$ pip install /path/to/pyarmnn-29.0.0-cp36-cp36m-linux_aarch64.whl
+$ pip install /path/to/pyarmnn-30.0.0-cp36-cp36m-linux_aarch64.whl
 ```
 
 ## Installing from source package
@@ -143,7 +143,7 @@
 
 Install PyArmNN as follows:
 ```bash
-$ pip install /path/to/pyarmnn-29.0.0.tar.gz
+$ pip install /path/to/pyarmnn-30.0.0.tar.gz
 ```
 
 If PyArmNN installation script fails to find Arm NN libraries it will raise an error like this
@@ -157,7 +157,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'29.0.0'
+'30.0.0'
 ```
 
 # PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index 7275a25..a360f01 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@
 You can also verify it by running the following and getting output similar to below:

 ```bash

 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"

-'29.0.0'

+'30.0.0'

 ```

 

 ##### Dependencies

diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 7a946ad..215cf77 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -54,7 +54,7 @@
 You can also verify it by running the following and getting output similar to below:
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'29.0.0'
+'30.0.0'
 ```
 
 ##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index 854cdaf..d5fee8a 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@
 
 ```bash
 $ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'29.0.0'
+'30.0.0'
 ```
 
 ### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index 7c0940e..d1b1ca2 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 import os
 
-version_info = (29, 0, 0)
+version_info = (30, 0, 0)
 
 __dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
 
@@ -24,7 +24,7 @@
     """Compares expected Arm NN version and Arm NN version used to build the package.
 
     Args:
-        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 29.0.0)
+        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 30.0.0)
         expected_armnn_version (str): Expected Arm NN version
 
     Returns:
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index a2f57a3..55b6795 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -29,7 +29,7 @@
                                that can not be reduced will be left in Fp32.
     m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers
                                that can not be reduced will be left in Fp32.
-    m_ImportEnabled (bool):    Enable memory import.
+    m_ImportEnabled (bool):    Enable memory import of inport tensors.
     m_shapeInferenceMethod:    The ShapeInferenceMethod modifies how the output shapes are treated.
                                When ValidateOnly is selected, the output shapes are inferred from the input parameters
                                of the layer and any mismatch is reported.
@@ -38,6 +38,7 @@
                                with tensors which rank or dimension sizes are not specified explicitly, however this
                                information can be calculated from the inputs.
     m_ModelOptions:            List of backends optimisation options.
+    m_ExportEnabled (bool):    Enable memory export of output tensors.
 
 ") OptimizerOptions;
 
@@ -51,7 +52,8 @@
                      bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
                      bool importEnabled = false,
-                     std::vector<armnn::BackendOptions> modelOptions = {});
+                     std::vector<armnn::BackendOptions> modelOptions = {},
+                     bool exportEnabled = false);
 
     bool m_ReduceFp32ToBf16;
     bool m_ReduceFp32ToFp16;
@@ -59,6 +61,7 @@
     ShapeInferenceMethod m_shapeInferenceMethod;
     bool m_ImportEnabled;
     std::vector<armnn::BackendOptions> m_ModelOptions;
+    bool m_ExportEnabled;
 };
 %model_options_clear;
 
diff --git a/python/pyarmnn/test/test_modeloption.py b/python/pyarmnn/test/test_modeloption.py
index c03d4a8..a47d2da 100644
--- a/python/pyarmnn/test/test_modeloption.py
+++ b/python/pyarmnn/test/test_modeloption.py
@@ -71,7 +71,8 @@
                           False,
                           ShapeInferenceMethod_InferAndValidate,
                           True,
-                          [a])
+                          [a],
+                          True)
 
     mo = oo.m_ModelOptions
 
@@ -112,7 +113,8 @@
                          False,
                          ShapeInferenceMethod_InferAndValidate,
                          True,
-                         a)
+                         a,
+                         True)
 
     assert "Wrong number or type of arguments" in str(err.value)
 
@@ -122,7 +124,8 @@
                          True,
                          ShapeInferenceMethod_InferAndValidate,
                          True,
-                         [a])
+                         [a],
+                         True)
 
     assert "BFloat16 and Float16 optimization cannot be enabled at the same time" in str(err.value)
 
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index a37772c..a6c4e1d 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -156,8 +156,8 @@
     opt_network, _ = ann.Optimize(network, preferred_backends,
                                   runtime.GetDeviceSpec(), ann.OptimizerOptions())
 
-    inputSource = ann.MemorySource_Malloc
-    outputSource = ann.MemorySource_Malloc
+    inputSource = ann.MemorySource_Undefined
+    outputSource = ann.MemorySource_Undefined
     properties = ann.INetworkProperties(False, inputSource, outputSource)
     net_id, messages = runtime.LoadNetwork(opt_network, properties)
     assert "" == messages
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index 4a6f930..27feda2 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@
 
 
 def test_armnn_version():
-    check_armnn_version('29.0.0', '29.0.0')
+    check_armnn_version('30.0.0', '30.0.0')
 
 
 def test_incorrect_armnn_version():
     with pytest.raises(AssertionError) as err:
-        check_armnn_version('29.0.0', '29.1.0')
+        check_armnn_version('30.0.0', '30.1.0')
 
-    assert 'Expected ArmNN version is 29.1.0 but installed ArmNN version is 29.0.0' in str(err.value)
+    assert 'Expected ArmNN version is 30.1.0 but installed ArmNN version is 30.0.0' in str(err.value)
 
 
 def test_armnn_version_patch_does_not_matter():
-    check_armnn_version('29.0.0', '29.0.1')
+    check_armnn_version('30.0.0', '30.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index f74ae02..83606ab 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@
 
     importlib.reload(v)
 
-    assert "29.0.0.dev1" == v.__version__
+    assert "30.0.0.dev1" == v.__version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
@@ -30,7 +30,7 @@
 
     importlib.reload(v)
 
-    assert "29.0.0" == v.__arm_ml_version__
+    assert "30.0.0" == v.__arm_ml_version__
 
     del os.environ["PYARMNN_DEV_VER"]
     del v
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index 194a3e9..bd84e26 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -253,8 +253,8 @@
 The full list of libs after cross-compilation to copy on your board:
 ```
 libarmnn.so
-libarmnn.so.29
-libarmnn.so.29.0
+libarmnn.so.30
+libarmnn.so.30.0
 For Arm NN public C++ API mode:
 libarmnnTfLiteParser.so
 libarmnnTfLiteParser.so.24.4
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index ec79d5d..a27add9 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -84,6 +84,87 @@
 
 } // anonymous
 
+/**
+ * This function performs a sanity check to ensure that the combination of input and output memory source matches the
+ * values for importEnabled and exportEnabled that were specified during optimization. During optimization the tensor
+ * handle factories are chosen based on whether import and export are enabled. If the user then specifies something
+ * incompatible here it can lead to problems.
+ *
+ * @param optimizedOptions
+ * @param networkProperties
+ */
+void ValidateSourcesMatchOptimizedNetwork(std::vector<BackendOptions> optimizedOptions,
+                                          const INetworkProperties& networkProperties)
+{
+    // Find the "Global" backend options. During the optimize phase the values of importEnabled and exportEnabled are
+    // added as backend options.
+    const vector<BackendOptions>::iterator& backendItr =
+        find_if(optimizedOptions.begin(), optimizedOptions.end(), [](const BackendOptions& backend) {
+            if (backend.GetBackendId().Get() == "Global")
+            {
+                return true;
+            }
+            else
+            {
+                return false;
+            }
+        });
+    bool importEnabled = false;
+    bool exportEnabled = false;
+    if (backendItr != optimizedOptions.end())
+    {
+        // Find the importEnabled and exportEnabled values.
+        for (size_t i = 0; i < backendItr->GetOptionCount(); i++)
+        {
+            const BackendOptions::BackendOption& option = backendItr->GetOption(i);
+            if (option.GetName() == "ImportEnabled")
+            {
+                importEnabled = option.GetValue().AsBool();
+            }
+            if (option.GetName() == "ExportEnabled")
+            {
+                exportEnabled = option.GetValue().AsBool();
+            }
+        }
+    }
+
+    // Now that we have values for import and export compare them to the MemorySource variables.
+    // Any value of MemorySource that's not "Undefined" implies that we need to do an import of some kind.
+    if ((networkProperties.m_InputSource == MemorySource::Undefined && importEnabled) ||
+        (networkProperties.m_InputSource != MemorySource::Undefined && !importEnabled))
+    {
+        auto message = fmt::format("The input memory source specified, '{0}',", networkProperties.m_InputSource);
+        if (!importEnabled)
+        {
+            message.append(" requires that memory import be enabled. However, "
+                           "it was disabled when this network was optimized.");
+        }
+        else
+        {
+            message.append(" requires that memory import be disabled. However, "
+                           "it was enabled when this network was optimized.");
+        }
+        throw InvalidArgumentException(message);
+    }
+
+    if ((networkProperties.m_OutputSource == MemorySource::Undefined && exportEnabled) ||
+        (networkProperties.m_OutputSource != MemorySource::Undefined && !exportEnabled))
+    {
+        auto message = fmt::format("The output memory source specified, '{0}',", networkProperties.m_OutputSource);
+        if (!exportEnabled)
+        {
+            message.append(" requires that memory export be enabled. However, "
+                           "it was disabled when this network was optimized.");
+        }
+        else
+        {
+            message.append(" requires that memory export be disabled. However, "
+                           "it was enabled when this network was optimized.");
+        }
+        throw InvalidArgumentException(message);
+    }
+} // anonymous
+
 std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                                                                 std::string& errorMessage,
                                                                 const INetworkProperties& networkProperties,
@@ -136,6 +217,11 @@
 
     profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
 
+    // We need to check that the memory sources match up with the values of import and export specified during the
+    // optimize phase. If they don't this will throw an exception.
+    ValidateSourcesMatchOptimizedNetwork(m_OptimizedNetwork.get()->pOptimizedNetworkImpl->GetModelOptions(),
+                                         m_NetworkProperties);
+
     //First create tensor handlers, backends and workload factories.
     //Handlers are created before workloads are.
     //Because workload creation can modify some of the handlers,
@@ -1439,7 +1525,7 @@
 
             ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
 
-            if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_InputSource))
+            if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
             {
                 throw MemoryImportException(
                     fmt::format("ImportInputs: Memory Import failed, backend: "
@@ -1451,7 +1537,7 @@
                     std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
                                                                    inputTensor.second.GetMemoryArea());
 
-            if (tensorHandle->Import(passThroughTensorHandle->Map(), m_NetworkProperties.m_InputSource))
+            if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource))
             {
                 importedInputs.push_back(m_CurImportedInputId++);
                 passThroughTensorHandle->Unmap();
@@ -1564,14 +1650,14 @@
 
         ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
 
-        if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_OutputSource))
+        if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
         {
             throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: "
                                                     "{} does not support importing from source {}"
-                                                    , factoryId, m_NetworkProperties.m_OutputSource));
+                                                    , factoryId, forceImportMemorySource));
         }
 
-        if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), m_NetworkProperties.m_OutputSource))
+        if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
         {
             importedOutputs.push_back(m_CurImportedOutputId++);
         }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index f2ba94f..9520c13 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1362,7 +1362,7 @@
 ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
                                                     OutputSlot& outputSlot,
                                                     TensorHandleFactoryRegistry& registry,
-                                                    bool importEnabled)
+                                                    bool exportEnabled)
 {
     // First ensure the from backends can support the TensorHandeAPI
     Layer& layer = outputSlot.GetOwningLayer();
@@ -1390,7 +1390,7 @@
     std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
     for (auto&& pref : srcPrefs)
     {
-        if (importEnabled)
+        if (exportEnabled)
         {
             ITensorHandleFactory* factory = registry.GetFactory(pref);
             if (outputConnection)
@@ -1602,12 +1602,13 @@
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
                                               bool importEnabled,
+                                              bool exportEnabled,
                                               Optional<std::vector<std::string>&> errMessages)
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
     OptimizationResult result;
 
-    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
+    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
     {
         ARMNN_ASSERT(layer);
 
@@ -1632,7 +1633,7 @@
                     slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
                     break;
                 default:
-                    slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled);
+                    slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
                     break;
             }
             outputSlot.SetTensorHandleFactory(slotOption);
@@ -1696,7 +1697,15 @@
 
     std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
 
-    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
+    // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
+    // The mechanism to do that is to add model options to the optimized network.
+    armnn::BackendOptions importExport("Global",
+                                        {{"ImportEnabled", options.m_ImportEnabled},
+                                         {"ExportEnabled", options.m_ExportEnabled}});
+    ModelOptions optimizedOptions(options.m_ModelOptions);
+    optimizedOptions.push_back(importExport);
+
+    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
                                        &IOptimizedNetwork::Destroy);
 
     IOptimizedNetwork* optNetObjPtr = optNet.get();
@@ -1819,7 +1828,9 @@
                                                                    backends,
                                                                    tensorHandleFactoryRegistry,
                                                                    options.m_ImportEnabled,
+                                                                   options.m_ExportEnabled,
                                                                    messages);
+
     if (strategyResult.m_Error)
     {
         // Failed to apply the backend-specific optimizations
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 6c7c2f5..2d34cfc 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -300,6 +300,7 @@
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
                                               bool importEnabled,
+                                              bool exportEnabled,
                                               Optional<std::vector<std::string>&> errMessages);
 
 OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index 376cdbc..f5dfadf 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -56,9 +56,9 @@
     armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
 
     std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
-                                              MemorySource forceImportMemorySource = MemorySource::Undefined);
+                                              MemorySource forceImportMemorySource);
     std::vector<ImportedOutputId> ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
-                                                MemorySource forceImportMemorySource = MemorySource::Undefined);
+                                                MemorySource forceImportMemorySource);
 
     void ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds);
     void ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds);
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 3cbe884..59f6554 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -93,7 +93,7 @@
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
     std::string er;
-    armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Undefined);
+    armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
     runtime->LoadNetwork(networkId,
                          Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
                          er,
@@ -107,7 +107,7 @@
     ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
     Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
 
-    auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}});
+    auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc);
     CHECK(importedInputVec1.size() == 1);
     CHECK(importedInputVec1[0] == 0);
 
@@ -118,7 +118,7 @@
         CHECK(val == 30);
     }
 
-    auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
+    auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
     CHECK(importedInputVec2.size() == 1);
     CHECK(importedInputVec2[0] == 1);
 
@@ -146,7 +146,7 @@
     // Incorrect layer binding id and ImportedInputId
     CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});,
                     armnn::InvalidArgumentException);
-    auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
+    auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
     CHECK(importedInputVec3[0] == 2);
     // Too many ImportedInputIds
     CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});,
@@ -175,6 +175,7 @@
     // Trying to delete unknown pre-imported tensor
     CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException);
 }
+
 TEST_CASE("RuntimePreImportOutputs")
 {
     armnn::IRuntime::CreationOptions options;
@@ -216,7 +217,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
 
     std::string er;
-    armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc);
+    armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
     runtime->LoadNetwork(networkId,
                          Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
                          er,
@@ -257,7 +258,7 @@
     runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
     testOutputs();
 
-    auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 });
+    auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc);
     CHECK(importedOutputVec.size() == 2);
     CHECK(importedOutputVec[0] == 0);
     CHECK(importedOutputVec[1] == 1);
@@ -271,7 +272,7 @@
     runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
     testOutputs();
 
-    auto importedInputVec = runtime->ImportInputs(networkId, inputTensors);
+    auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
     CHECK(importedInputVec.size() == 2);
     CHECK(importedInputVec[0] == 0);
     CHECK(importedInputVec[1] == 1);
@@ -1293,4 +1294,176 @@
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
 }
 
+TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
+{
+    // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
+    // that network but specify that the import memory source is Malloc.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = false;
+    optimizedOptions.m_ExportEnabled = false;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was disabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
+{
+    // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
+    // that network but specify that the export memory source as Malloc.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = false;
+    optimizedOptions.m_ExportEnabled = false;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was disabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
+{
+    // In this test case we'll optimize a network with import enabled. Then we'll attempt to load
+    // that network but specify that the import memory source is Undefined.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = false;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was enabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
+{
+    // In this test case we'll optimize a network with export enabled. Then we'll attempt to load
+    // that network but specify that the export memory source is Undefined.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = false;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was enabled when this network was optimized") != -1);
+}
+
 }
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index c591fff..2ea3c2a 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -342,7 +342,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
 
     CHECK(result.m_Error == false);
     CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index c69a4b5..9c85ffc 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -73,7 +73,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
 
     CHECK(result.m_Error == false);
     CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 77901df..cc5aa23 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -204,7 +204,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -269,7 +271,10 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -340,7 +345,10 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -424,7 +432,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -514,7 +524,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -601,7 +613,10 @@
     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -694,7 +709,10 @@
     activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     // Loads it into the runtime.
     NetworkId netId;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index bcea061..cd865de 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -421,7 +421,7 @@
 
     std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
     armnn::ModelOptions modelOptions;
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
+    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
     std::vector<std::string> errorMessages;
 
     // optimize the network.
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 139e688..251c98f 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -120,6 +120,7 @@
     // Optimise ArmNN network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
     CHECK(optNet);
 
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 6ac9433..51a983a 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -50,6 +50,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -330,6 +331,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 20537b3..9a075d2 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -142,6 +142,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -338,6 +339,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -470,6 +472,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -613,6 +616,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -747,6 +751,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -896,6 +901,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -1117,6 +1123,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index cf17eae..6648759 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -130,7 +130,7 @@
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
     CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
     CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index d2de843..8e0e0ab 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -60,6 +60,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -203,6 +204,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -338,6 +340,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -482,6 +485,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -746,6 +750,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -1037,6 +1042,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 9b448b2..dcda9bf 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -106,7 +106,7 @@
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
     CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
     CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }