IVGCVSW-6873 Import inputs but don't export outputs fails.

Only one bool is used to indicate whether inputs should be imported.
However, its possible for the user to want to import inputs but not
export outputs. In addition it's possible for a user to enabled import
during optimize but then pass a memory source that does not require
import.

* Add m_ExportEnabled to INetwork.hpp.
* Modify Network::dNetwork to consider both m_ImportEnabled
  and m_ExportEnabled.
* Add ValidateSourcesMatchOptimizedNetwork to LoadedNetwork to validate
  import options between optimize and network load.
* Update the TfLite delegate consider exportEnabled flag in the
  optimizer.

!armnn-internal-tests:425350
Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: I776eab81595898e43f91ab40306962eae61329f4
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index ec79d5d..a27add9 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -84,6 +84,87 @@
 
 } // anonymous
 
+/**
+ * This function performs a sanity check to ensure that the combination of input and output memory source matches the
+ * values for importEnabled and exportEnabled that were specified during optimization. During optimization the tensor
+ * handle factories are chosen based on whether import and export are enabled. If the user then specifies something
+ * incompatible here it can lead to problems.
+ *
+ * @param optimizedOptions
+ * @param networkProperties
+ */
+void ValidateSourcesMatchOptimizedNetwork(std::vector<BackendOptions> optimizedOptions,
+                                          const INetworkProperties& networkProperties)
+{
+    // Find the "Global" backend options. During the optimize phase the values of importEnabled and exportEnabled are
+    // added as backend options.
+    const vector<BackendOptions>::iterator& backendItr =
+        find_if(optimizedOptions.begin(), optimizedOptions.end(), [](const BackendOptions& backend) {
+            if (backend.GetBackendId().Get() == "Global")
+            {
+                return true;
+            }
+            else
+            {
+                return false;
+            }
+        });
+    bool importEnabled = false;
+    bool exportEnabled = false;
+    if (backendItr != optimizedOptions.end())
+    {
+        // Find the importEnabled and exportEnabled values.
+        for (size_t i = 0; i < backendItr->GetOptionCount(); i++)
+        {
+            const BackendOptions::BackendOption& option = backendItr->GetOption(i);
+            if (option.GetName() == "ImportEnabled")
+            {
+                importEnabled = option.GetValue().AsBool();
+            }
+            if (option.GetName() == "ExportEnabled")
+            {
+                exportEnabled = option.GetValue().AsBool();
+            }
+        }
+    }
+
+    // Now that we have values for import and export compare them to the MemorySource variables.
+    // Any value of MemorySource that's not "Undefined" implies that we need to do an import of some kind.
+    if ((networkProperties.m_InputSource == MemorySource::Undefined && importEnabled) ||
+        (networkProperties.m_InputSource != MemorySource::Undefined && !importEnabled))
+    {
+        auto message = fmt::format("The input memory source specified, '{0}',", networkProperties.m_InputSource);
+        if (!importEnabled)
+        {
+            message.append(" requires that memory import be enabled. However, "
+                           "it was disabled when this network was optimized.");
+        }
+        else
+        {
+            message.append(" requires that memory import be disabled. However, "
+                           "it was enabled when this network was optimized.");
+        }
+        throw InvalidArgumentException(message);
+    }
+
+    if ((networkProperties.m_OutputSource == MemorySource::Undefined && exportEnabled) ||
+        (networkProperties.m_OutputSource != MemorySource::Undefined && !exportEnabled))
+    {
+        auto message = fmt::format("The output memory source specified, '{0}',", networkProperties.m_OutputSource);
+        if (!exportEnabled)
+        {
+            message.append(" requires that memory export be enabled. However, "
+                           "it was disabled when this network was optimized.");
+        }
+        else
+        {
+            message.append(" requires that memory export be disabled. However, "
+                           "it was enabled when this network was optimized.");
+        }
+        throw InvalidArgumentException(message);
+    }
+} // anonymous
+
 std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                                                                 std::string& errorMessage,
                                                                 const INetworkProperties& networkProperties,
@@ -136,6 +217,11 @@
 
     profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
 
+    // We need to check that the memory sources match up with the values of import and export specified during the
+    // optimize phase. If they don't this will throw an exception.
+    ValidateSourcesMatchOptimizedNetwork(m_OptimizedNetwork.get()->pOptimizedNetworkImpl->GetModelOptions(),
+                                         m_NetworkProperties);
+
     //First create tensor handlers, backends and workload factories.
     //Handlers are created before workloads are.
     //Because workload creation can modify some of the handlers,
@@ -1439,7 +1525,7 @@
 
             ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
 
-            if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_InputSource))
+            if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
             {
                 throw MemoryImportException(
                     fmt::format("ImportInputs: Memory Import failed, backend: "
@@ -1451,7 +1537,7 @@
                     std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
                                                                    inputTensor.second.GetMemoryArea());
 
-            if (tensorHandle->Import(passThroughTensorHandle->Map(), m_NetworkProperties.m_InputSource))
+            if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource))
             {
                 importedInputs.push_back(m_CurImportedInputId++);
                 passThroughTensorHandle->Unmap();
@@ -1564,14 +1650,14 @@
 
         ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
 
-        if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_OutputSource))
+        if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
         {
             throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: "
                                                     "{} does not support importing from source {}"
-                                                    , factoryId, m_NetworkProperties.m_OutputSource));
+                                                    , factoryId, forceImportMemorySource));
         }
 
-        if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), m_NetworkProperties.m_OutputSource))
+        if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
         {
             importedOutputs.push_back(m_CurImportedOutputId++);
         }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index f2ba94f..9520c13 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1362,7 +1362,7 @@
 ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
                                                     OutputSlot& outputSlot,
                                                     TensorHandleFactoryRegistry& registry,
-                                                    bool importEnabled)
+                                                    bool exportEnabled)
 {
     // First ensure the from backends can support the TensorHandeAPI
     Layer& layer = outputSlot.GetOwningLayer();
@@ -1390,7 +1390,7 @@
     std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
     for (auto&& pref : srcPrefs)
     {
-        if (importEnabled)
+        if (exportEnabled)
         {
             ITensorHandleFactory* factory = registry.GetFactory(pref);
             if (outputConnection)
@@ -1602,12 +1602,13 @@
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
                                               bool importEnabled,
+                                              bool exportEnabled,
                                               Optional<std::vector<std::string>&> errMessages)
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
     OptimizationResult result;
 
-    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
+    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
     {
         ARMNN_ASSERT(layer);
 
@@ -1632,7 +1633,7 @@
                     slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
                     break;
                 default:
-                    slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled);
+                    slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
                     break;
             }
             outputSlot.SetTensorHandleFactory(slotOption);
@@ -1696,7 +1697,15 @@
 
     std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
 
-    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
+    // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
+    // The mechanism to do that is to add model options to the optimized network.
+    armnn::BackendOptions importExport("Global",
+                                        {{"ImportEnabled", options.m_ImportEnabled},
+                                         {"ExportEnabled", options.m_ExportEnabled}});
+    ModelOptions optimizedOptions(options.m_ModelOptions);
+    optimizedOptions.push_back(importExport);
+
+    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
                                        &IOptimizedNetwork::Destroy);
 
     IOptimizedNetwork* optNetObjPtr = optNet.get();
@@ -1819,7 +1828,9 @@
                                                                    backends,
                                                                    tensorHandleFactoryRegistry,
                                                                    options.m_ImportEnabled,
+                                                                   options.m_ExportEnabled,
                                                                    messages);
+
     if (strategyResult.m_Error)
     {
         // Failed to apply the backend-specific optimizations
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 6c7c2f5..2d34cfc 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -300,6 +300,7 @@
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
                                               bool importEnabled,
+                                              bool exportEnabled,
                                               Optional<std::vector<std::string>&> errMessages);
 
 OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index 376cdbc..f5dfadf 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -56,9 +56,9 @@
     armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
 
     std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
-                                              MemorySource forceImportMemorySource = MemorySource::Undefined);
+                                              MemorySource forceImportMemorySource);
     std::vector<ImportedOutputId> ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
-                                                MemorySource forceImportMemorySource = MemorySource::Undefined);
+                                                MemorySource forceImportMemorySource);
 
     void ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds);
     void ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds);
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 3cbe884..59f6554 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -93,7 +93,7 @@
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
 
     std::string er;
-    armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Undefined);
+    armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
     runtime->LoadNetwork(networkId,
                          Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
                          er,
@@ -107,7 +107,7 @@
     ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
     Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
 
-    auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}});
+    auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc);
     CHECK(importedInputVec1.size() == 1);
     CHECK(importedInputVec1[0] == 0);
 
@@ -118,7 +118,7 @@
         CHECK(val == 30);
     }
 
-    auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
+    auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
     CHECK(importedInputVec2.size() == 1);
     CHECK(importedInputVec2[0] == 1);
 
@@ -146,7 +146,7 @@
     // Incorrect layer binding id and ImportedInputId
     CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});,
                     armnn::InvalidArgumentException);
-    auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
+    auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
     CHECK(importedInputVec3[0] == 2);
     // Too many ImportedInputIds
     CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});,
@@ -175,6 +175,7 @@
     // Trying to delete unknown pre-imported tensor
     CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException);
 }
+
 TEST_CASE("RuntimePreImportOutputs")
 {
     armnn::IRuntime::CreationOptions options;
@@ -216,7 +217,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
 
     std::string er;
-    armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc);
+    armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
     runtime->LoadNetwork(networkId,
                          Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
                          er,
@@ -257,7 +258,7 @@
     runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
     testOutputs();
 
-    auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 });
+    auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc);
     CHECK(importedOutputVec.size() == 2);
     CHECK(importedOutputVec[0] == 0);
     CHECK(importedOutputVec[1] == 1);
@@ -271,7 +272,7 @@
     runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
     testOutputs();
 
-    auto importedInputVec = runtime->ImportInputs(networkId, inputTensors);
+    auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
     CHECK(importedInputVec.size() == 2);
     CHECK(importedInputVec[0] == 0);
     CHECK(importedInputVec[1] == 1);
@@ -1293,4 +1294,176 @@
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
 }
 
+TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
+{
+    // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
+    // that network but specify that the import memory source is Malloc.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = false;
+    optimizedOptions.m_ExportEnabled = false;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was disabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
+{
+    // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
+    // that network but specify that the export memory source as Malloc.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = false;
+    optimizedOptions.m_ExportEnabled = false;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was disabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
+{
+    // In this test case we'll optimize a network with import enabled. Then we'll attempt to load
+    // that network but specify that the import memory source is Undefined.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = false;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was enabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
+{
+    // In this test case we'll optimize a network with export enabled. Then we'll attempt to load
+    // that network but specify that the export memory source is Undefined.
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+    armnn::NetworkId networkId = 1;
+    armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+    auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+    auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+    auto addLayer = testNetwork->AddAdditionLayer("add layer");
+    auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+    TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+    inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+    inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+    inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+    OptimizerOptions optimizedOptions;
+    // Hard set import and export to off.
+    optimizedOptions.m_ImportEnabled = false;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    std::string er;
+    // Load the network passing an import memory source.
+    armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
+    // There should be an InvalidArgumentException.
+    runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+    CHECK(er.find("However, it was enabled when this network was optimized") != -1);
+}
+
 }
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index c591fff..2ea3c2a 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -342,7 +342,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
 
     CHECK(result.m_Error == false);
     CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index c69a4b5..9c85ffc 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -73,7 +73,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
 
     CHECK(result.m_Error == false);
     CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 77901df..cc5aa23 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -204,7 +204,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -269,7 +271,10 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -340,7 +345,10 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
     CHECK(optNet);
 
     // Loads it into the runtime.
@@ -424,7 +432,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -514,7 +524,9 @@
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -601,7 +613,10 @@
     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     INFO("Load Network");
     // Load it into the runtime. It should pass.
@@ -694,7 +709,10 @@
     activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
 
     // Optimize the network
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optimizedOptions;
+    optimizedOptions.m_ImportEnabled = true;
+    optimizedOptions.m_ExportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
 
     // Loads it into the runtime.
     NetworkId netId;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index bcea061..cd865de 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -421,7 +421,7 @@
 
     std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
     armnn::ModelOptions modelOptions;
-    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
+    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
     std::vector<std::string> errorMessages;
 
     // optimize the network.
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 139e688..251c98f 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -120,6 +120,7 @@
     // Optimise ArmNN network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
     CHECK(optNet);
 
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 6ac9433..51a983a 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -50,6 +50,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -330,6 +331,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 20537b3..9a075d2 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -142,6 +142,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -338,6 +339,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -470,6 +472,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -613,6 +616,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -747,6 +751,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -896,6 +901,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
@@ -1117,6 +1123,7 @@
     // Optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = false;
+    optOptions.m_ExportEnabled = false;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
     CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index cf17eae..6648759 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -130,7 +130,7 @@
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
     CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
     CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index d2de843..8e0e0ab 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -60,6 +60,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -203,6 +204,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -338,6 +340,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -482,6 +485,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -746,6 +750,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
@@ -1037,6 +1042,7 @@
     // optimize the network
     OptimizerOptions optOptions;
     optOptions.m_ImportEnabled = true;
+    optOptions.m_ExportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 9b448b2..dcda9bf 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -106,7 +106,7 @@
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
     CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
     CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }