IVGCVSW-6896 Fix pre-import when using sync execute.

* Refactor backend capability checks in LoadedNetwork.
* ImportInputs should check the number of tensors does not exceed the
  number of inputs.
* In EnqueueWorkload the check for for the count of input tensors
  was ignoring pre-imported inputs.
* Added checks to verify ImportInputs/ImportOutputs worked as expected
  in EndToEndTestImpl.
* Improve documentation on ImportInputs/ImportOutputs in IRuntime.hpp.
* Disabled import tests in CL and Neon EndToEndTests that cannot work.

Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: Iae4b2644a1c9f01ee72bce1afb211661cc9ae2e3
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index cc5aa23..44ae2be 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -887,10 +887,12 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
     std::vector<ImportedInputId> importedInputIds =
         runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+    CHECK(importedInputIds.size() == 1);
     std::vector<ImportedOutputId> importedOutputIds =
         runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+    CHECK(importedOutputIds.size() == 1);
     // Do the inference and force the import as the memory is aligned.
-    runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+    runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds);
 
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -997,11 +999,14 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
     std::vector<ImportedInputId> importedInputIds =
         runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+    // We expect the import to have failed.
+    CHECK(importedInputIds.size() == 0);
     std::vector<ImportedOutputId> importedOutputIds =
         runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+    CHECK(importedOutputIds.size() == 1);
 
     // Do the inference and force the import as the memory is misaligned.
-    runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+    runtime->EnqueueWorkload(netId, inputTensors, OutputTensors(), importedInputIds, importedOutputIds);
 
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -1113,11 +1118,14 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
     std::vector<ImportedInputId> importedInputIds =
         runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+    CHECK(importedInputIds.size() == 1);
+    // We expect this to fail.
     std::vector<ImportedOutputId> importedOutputIds =
         runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+    CHECK(importedOutputIds.size() == 0);
 
-    // Do the inference and force the import as the memory is misaligned.
-    runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+    // Even if importing the output failed we still expect to be able to get it to work.
+    runtime->EnqueueWorkload(netId, InputTensors(), outputTensors, importedInputIds, importedOutputIds);
 
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -1233,8 +1241,12 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
     std::vector<ImportedInputId> importedInputIds =
         runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+    // Import should have failed.
+    CHECK(importedInputIds.size() == 0);
     std::vector<ImportedOutputId> importedOutputIds =
         runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+    // Import should have failed.
+    CHECK(importedOutputIds.size() == 0);
 
     // Do the inference and force the import as the memory is misaligned.
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
@@ -1339,10 +1351,12 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
     std::vector<ImportedInputId> importedInputIds =
         runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+    CHECK(importedInputIds.size() == 1);
     std::vector<ImportedOutputId> importedOutputIds =
         runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+    CHECK(importedOutputIds.size() == 1);
     // Do the inference and force the import as the memory is aligned.
-    runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+    runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds);
 
     // Retrieve the Profiler.AnalyzeEventsAndWriteResults() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -1408,7 +1422,11 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputPtr)}
     };
     importedInputIds = runtime->ImportInputs(netId, inputTensorsMisaligned, MemorySource::Malloc);
+    // Import should fail.
+    CHECK(importedInputIds.size() == 0);
     importedOutputIds = runtime->ImportOutputs(netId, outputTensorsMisaligned, MemorySource::Malloc);
+    // Import should fail.
+    CHECK(importedOutputIds.size() == 0);
 
     // Do the inference and force the import as the memory is misaligned.
     runtime->EnqueueWorkload(netId,
@@ -1527,8 +1545,12 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
     std::vector<ImportedInputId>  importedInputIds =
         runtime->ImportInputs(netId, inputTensorsMisaligned, MemorySource::Malloc);
+    // Import should fail.
+    CHECK(importedInputIds.size() == 0);
     std::vector<ImportedOutputId> importedOutputIds =
         runtime->ImportOutputs(netId, outputTensorsMisaligned, MemorySource::Malloc);
+    // Import should fail.
+    CHECK(importedOutputIds.size() == 0);
 
     // Do the inference and force the import as the memory is misaligned.
     runtime->EnqueueWorkload(netId,
@@ -1593,9 +1615,11 @@
     };
 
     importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+    CHECK(importedInputIds.size() == 1);
     importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+    CHECK(importedOutputIds.size() == 1);
     // Do the inference and force the import as the memory is aligned.
-    runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+    runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds);
 
     // Retrieve the Profiler.AnalyzeEventsAndWriteResults() output to get the workload execution
     // We need to use AnalyzeEventsAndWriteResults here to make sure the second inference has been profiled