IVGCVSW-3687 Add INetworkProperties to LoadNetwork

 * Allows users to specify if Import/Export should be used

Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: I64da26a6acbeb91ef72d31b6ccc01bb1447f624d
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 040782b..ecc8806 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -172,7 +172,7 @@
     }
 }
 
-inline void ImportNonAlignedPointerTest(std::vector<BackendId> backends)
+inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
 {
     using namespace armnn;
 
@@ -201,7 +201,10 @@
 
     // Loads it into the runtime.
     NetworkId netId;
-    runtime->LoadNetwork(netId, std::move(optNet));
+    std::string ignoredErrorMessage;
+    // Enable Importing
+    INetworkProperties networkProperties(true, true);
+    runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
 
     // Creates structures for input & output
     std::vector<float> inputData
@@ -214,12 +217,77 @@
 
     std::vector<float> outputData(5);
 
+    // Aligned output
+    float * alignedOutputData = outputData.data();
+
+    InputTensors inputTensors
+    {
+        {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)},
+    };
+    OutputTensors outputTensors
+    {
+        {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), alignedOutputData)}
+    };
+
+    // The result of the inference is not important, just the fact that there
+    // should not be CopyMemGeneric workloads.
+    runtime->GetProfiler(netId)->EnableProfiling(true);
+
+    // Do the inference and expect it to fail with a ImportMemoryException
+    BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
+}
+
+inline void ImportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
+{
+    using namespace armnn;
+
+    // Create runtime in which test will run
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    // build up the structure of the network
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input = net->AddInputLayer(0);
+
+    NormalizationDescriptor descriptor;
+    IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
+
+    IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
+    norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+    norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+
+    // Optimize the network
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+    // Loads it into the runtime.
+    NetworkId netId;
+    std::string ignoredErrorMessage;
+    // Enable Importing
+    INetworkProperties networkProperties(true, true);
+    runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
+
+    // Creates structures for input & output
+    std::vector<float> inputData
+    {
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f
+    };
+
+    // Aligned input
+    float * alignedInputData = inputData.data();
+
+    std::vector<float> outputData(5);
+
     // Misaligned output
     float* misalignedOutputData = reinterpret_cast<float*>(reinterpret_cast<char*>(outputData.data()) + 1);
 
     InputTensors inputTensors
     {
-        {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)},
+        {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), alignedInputData)},
     };
     OutputTensors outputTensors
     {
@@ -230,24 +298,8 @@
     // should not be CopyMemGeneric workloads.
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    // Do the inference
-    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
-
-    // Retrieve the Profiler.Print() output to get the workload execution
-    ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
-    std::stringstream ss;
-    profilerManager.GetProfiler()->Print(ss);;
-    std::string dump = ss.str();
-
-    // Contains RefNormalizationWorkload
-    std::size_t found = dump.find("RefNormalizationWorkload");
-    BOOST_TEST(found != std::string::npos);
-    // No Contains SyncMemGeneric (Created when importing the output tensor handle)
-    found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found == std::string::npos);
-    // Contains CopyMemGeneric
-    found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    // Do the inference and expect it to fail with a ImportMemoryException
+    BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException);
 }
 
 inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
@@ -279,7 +331,10 @@
 
     // Loads it into the runtime.
     NetworkId netId;
-    runtime->LoadNetwork(netId, std::move(optNet));
+    std::string ignoredErrorMessage;
+    // Enable Importing
+    INetworkProperties networkProperties(true, true);
+    runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
 
     // Creates structures for input & output
     std::vector<float> inputData