IVGCVSW-5012 Add importEnabled option for OptimizerOptions

 * Default importEnabled to false
 * Improve error messages

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I17f78986aa1d23e48b0844297a52029b1a9bbe3e
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 94a9961..dec9468 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -861,7 +861,8 @@
                                    ITensorHandleFactory::FactoryId srcFactoryId,
                                    const Layer& layer,
                                    const Layer& connectedLayer,
-                                   TensorHandleFactoryRegistry& registry)
+                                   TensorHandleFactoryRegistry& registry,
+                                   bool importEnabled)
 {
     auto toBackend = backends.find(connectedLayer.GetBackendId());
     ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
@@ -899,7 +900,7 @@
 
     // Search for export/import options
     ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
-    if (srcFactory->GetExportFlags() != 0)
+    if (srcFactory->GetExportFlags() != 0 && importEnabled)
     {
         for (auto&& pref : dstPrefs)
         {
@@ -945,11 +946,12 @@
 OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
+                                              bool importEnabled,
                                               Optional<std::vector<std::string>&> errMessages)
 {
     OptimizationResult result;
 
-    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
+    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
     {
         ARMNN_ASSERT(layer);
 
@@ -985,7 +987,8 @@
             {
                 const Layer& connectedLayer = connection->GetOwningLayer();
 
-                EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer, registry);
+                EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
+                                                              registry, importEnabled);
 
                 if (strategy == EdgeStrategy::Undefined)
                 {
@@ -1122,6 +1125,7 @@
     OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
                                                                    backends,
                                                                    tensorHandleFactoryRegistry,
+                                                                   options.m_ImportEnabled,
                                                                    messages);
     if (strategyResult.m_Error)
     {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 77d6b04..7136ee4 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -323,6 +323,7 @@
 OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
+                                              bool importEnabled,
                                               Optional<std::vector<std::string>&> errMessages);
 
 OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 976e58e..c7aa30f 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -339,7 +339,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
     BOOST_TEST(result.m_Error == false);
     BOOST_TEST(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index 599c984..90aa76e 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -64,7 +64,7 @@
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
     BOOST_TEST(result.m_Error == false);
     BOOST_TEST(result.m_Warning == false);
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index 4cc610c..be5bd45 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -159,6 +159,14 @@
                     return m_Imported;
                 }
             }
+            else
+            {
+                throw MemoryImportException("NeonTensorHandle::Import is disabled");
+            }
+        }
+        else
+        {
+            throw MemoryImportException("NeonTensorHandle::Incorrect import flag");
         }
         return false;
     }
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index cf4d91b..9a07ed2 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -60,7 +60,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -196,7 +198,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -325,7 +329,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -461,7 +467,9 @@
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -544,4 +552,136 @@
     BOOST_TEST(outputData == expectedOutput);
 }
 
+BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
+{
+    using namespace armnn;
+
+    // Create a mock backend object
+    MockImportBackendInitialiser initialiser; // Register the Mock Backend
+    auto backendObjPtr = CreateBackendObject(MockImportBackendId());
+    BOOST_TEST((backendObjPtr != nullptr));
+
+    BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
+    if (backendIds.find("MockRef") == backendIds.end())
+    {
+        std::string message = "Cannot load MockRef";
+        BOOST_FAIL(message);
+    }
+
+    // Create runtime in which test will run and allow fallback to CpuRef.
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
+    IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
+    IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
+    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddAdditionLayer("add");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
+    input1->GetOutputSlot(0).Connect(sub->GetInputSlot(1));
+    input2->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    sub->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+
+    input0->GetOutputSlot(0).SetTensorInfo(info);
+    input1->GetOutputSlot(0).SetTensorInfo(info);
+    input2->GetOutputSlot(0).SetTensorInfo(info);
+    sub->GetOutputSlot(0).SetTensorInfo(info);
+    add->GetOutputSlot(0).SetTensorInfo(info);
+
+    // optimize the network
+    std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+    Graph& graph = optNetObjPtr->GetGraph();
+
+    armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
+    armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
+    armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "input2");
+    armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "sub");
+    armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "[ sub (0) -> add (1) ]");
+    armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "add");
+    armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
+
+    // Checks order is valid.
+    BOOST_TEST(CheckOrder(graph, layer0, layer1));
+    BOOST_TEST(CheckOrder(graph, layer1, layer2));
+    BOOST_TEST(CheckOrder(graph, layer2, layer3));
+    BOOST_TEST(CheckOrder(graph, layer3, layer4));
+    BOOST_TEST(CheckOrder(graph, layer4, layer5));
+    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+
+    // Load it into the runtime. It should pass.
+    NetworkId netId;
+    std::string ignoredErrorMessage;
+    INetworkProperties networkProperties(false, false);
+
+    runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
+
+    // Creates structures for input & output
+    std::vector<float> inputData0
+    {
+        1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 0.0f
+    };
+    std::vector<float> inputData1
+    {
+        0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
+    };
+    std::vector<float> inputData2
+    {
+        12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+    };
+
+    std::vector<float> outputData(12);
+
+    std::vector<float> expectedOutput
+    {
+        13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
+    };
+
+    InputTensors inputTensors
+    {
+        { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
+        { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
+        { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+    };
+    OutputTensors outputTensors
+    {
+        { 0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
+    };
+
+    runtime->GetProfiler(netId)->EnableProfiling(true);
+
+    // Do the inference
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // Retrieve the Profiler.Print() output to get the workload execution
+    ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+    std::stringstream ss;
+    profilerManager.GetProfiler()->Print(ss);;
+    std::string dump = ss.str();
+
+    // Contains CopyMemGeneric between the backends
+    std::size_t found = dump.find("CopyMemGeneric");
+    BOOST_TEST(found != std::string::npos);
+
+    // Does not contain ImportMemGeneric
+    found = dump.find("ImportMemGeneric");
+    BOOST_TEST(found == std::string::npos);
+
+    // Use memory import between backends
+    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+
+    // Check output is as expected
+    BOOST_TEST(outputData == expectedOutput);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index c881632..97c7dd3 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -661,7 +661,7 @@
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Cannot import as import is disabled
-    BOOST_CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    BOOST_CHECK_THROW(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
 }
 
 BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryImport)