IVGCVSW-6957 'Import Host Memory in SL'

* Enabled import host memory in SL as default
* Updated import host memory functionality in GpuAcc

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I22132b1e1008159b0e7247219762e3e9ae5eba10
diff --git a/shim/sl/canonical/ArmnnDriver.hpp b/shim/sl/canonical/ArmnnDriver.hpp
index 877faa6..c33c61a 100644
--- a/shim/sl/canonical/ArmnnDriver.hpp
+++ b/shim/sl/canonical/ArmnnDriver.hpp
@@ -39,12 +39,6 @@
     ~ArmnnDriver()
     {
         VLOG(DRIVER) << "ArmnnDriver::~ArmnnDriver()";
-        // Unload the networks
-        for (auto& netId : ArmnnDriverImpl::GetLoadedNetworks())
-        {
-            m_Runtime->UnloadNetwork(netId);
-        }
-        ArmnnDriverImpl::ClearNetworks();
     }
 
 public:
diff --git a/shim/sl/canonical/ArmnnDriverImpl.cpp b/shim/sl/canonical/ArmnnDriverImpl.cpp
index 3223d9e..8706c38 100644
--- a/shim/sl/canonical/ArmnnDriverImpl.cpp
+++ b/shim/sl/canonical/ArmnnDriverImpl.cpp
@@ -114,11 +114,6 @@
     return ValidateSharedHandle(dataCacheHandle[0]);
 }
 
-std::vector<armnn::NetworkId>& ArmnnDriverImpl::GetLoadedNetworks()
-{
-    return m_NetworkIDs;
-}
-
 GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModel(
     const armnn::IRuntimePtr& runtime,
     const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
@@ -317,7 +312,6 @@
                                             options.GetBackends().end(),
                                             armnn::Compute::GpuAcc) != options.GetBackends().end());
 
-    m_NetworkIDs.push_back(netId);
     auto preparedModel = std::make_shared<const ArmnnPreparedModel>(netId,
                                                                     runtime.get(),
                                                                     model,
@@ -356,8 +350,6 @@
     return std::move(preparedModel);
 }
 
-std::vector<armnn::NetworkId> ArmnnDriverImpl::m_NetworkIDs = {};
-
 GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModelFromCache(
     const armnn::IRuntimePtr& runtime,
     const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
@@ -537,7 +529,6 @@
         return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
     }
 
-    m_NetworkIDs.push_back(netId);
     return std::make_shared<const ArmnnPreparedModel>(netId,
                                                       runtime.get(),
                                                       options.GetRequestInputsAndOutputsDumpDir(),
@@ -553,9 +544,4 @@
     return theCapabilities;
 }
 
-void ArmnnDriverImpl::ClearNetworks()
-{
-    m_NetworkIDs.clear();
-}
-
 } // namespace armnn_driver
diff --git a/shim/sl/canonical/ArmnnDriverImpl.hpp b/shim/sl/canonical/ArmnnDriverImpl.hpp
index 836bf46..6af0ab2 100644
--- a/shim/sl/canonical/ArmnnDriverImpl.hpp
+++ b/shim/sl/canonical/ArmnnDriverImpl.hpp
@@ -45,15 +45,9 @@
 
     static const Capabilities& GetCapabilities(const armnn::IRuntimePtr& runtime);
 
-    static std::vector<armnn::NetworkId>& GetLoadedNetworks();
-
-    static void ClearNetworks();
-
 private:
     static bool ValidateSharedHandle(const SharedHandle& sharedHandle);
     static bool ValidateDataCacheHandle(const std::vector<SharedHandle>& dataCacheHandle, const size_t dataSize);
-
-    static std::vector<armnn::NetworkId> m_NetworkIDs;
 };
 
 } // namespace armnn_driver
\ No newline at end of file
diff --git a/shim/sl/canonical/ArmnnPreparedModel.cpp b/shim/sl/canonical/ArmnnPreparedModel.cpp
index c0ce3e4..54a0190 100644
--- a/shim/sl/canonical/ArmnnPreparedModel.cpp
+++ b/shim/sl/canonical/ArmnnPreparedModel.cpp
@@ -93,21 +93,21 @@
 {
     for (auto& input : request.inputs)
     {
-        if (input.lifetime == Request::Argument::LifeTime::POINTER)
+        if (input.lifetime != Request::Argument::LifeTime::POINTER)
         {
-            return true;
+            return false;
         }
     }
 
     for (auto& output: request.outputs)
     {
-        if (output.lifetime == Request::Argument::LifeTime::POINTER)
+        if (output.lifetime != Request::Argument::LifeTime::POINTER)
         {
-           return true;
+           return false;
         }
     }
 
-    return false;
+    return true;
 }
 
 } // anonymous namespace
@@ -318,7 +318,8 @@
         }
         VLOG(DRIVER) << "ArmnnPreparedModel::execute(): " << GetModelSummary(m_Model).c_str();
     }
-    if (hasDeadlinePassed(deadline)) {
+    if (hasDeadlinePassed(deadline))
+    {
         return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
     }
 
@@ -381,7 +382,8 @@
     VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph(...)";
 
     DumpTensorsIfRequired("Input", inputTensors);
-
+    std::vector<armnn::ImportedInputId> importedInputIds;
+    std::vector<armnn::ImportedOutputId> importedOutputIds;
     try
     {
         if (ctx.measureTimings == MeasureTiming::YES)
@@ -390,24 +392,13 @@
         }
         armnn::Status status;
         VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false";
-
-        if (pointerMemory)
-        {
-            std::vector<armnn::ImportedInputId> importedInputIds;
-            importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
-
-            std::vector<armnn::ImportedOutputId> importedOutputIds;
-            importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
-            status = m_Runtime->EnqueueWorkload(m_NetworkId,
-                                                inputTensors,
-                                                outputTensors,
-                                                importedInputIds,
-                                                importedOutputIds);
-        }
-        else
-        {
-            status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
-        }
+        importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
+        importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
+        status = m_Runtime->EnqueueWorkload(m_NetworkId,
+                                            inputTensors,
+                                            outputTensors,
+                                            importedInputIds,
+                                            importedOutputIds);
 
         if (ctx.measureTimings == MeasureTiming::YES)
         {
@@ -430,7 +421,7 @@
         return ErrorStatus::GENERAL_FAILURE;
     }
 
-    if (!pointerMemory)
+    if (!pointerMemory && (!importedInputIds.empty() || !importedOutputIds.empty()))
     {
         CommitPools(*pMemPools);
     }
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index a27add9..8e664e6 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -1466,13 +1466,21 @@
                     std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
                                                                    inputTensor.second.GetMemoryArea());
 
-            if (outputTensorHandle->CanBeImported(passThroughTensorHandle->Map(), forceImportMemorySource)
-                && (outputTensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)))
+            try
             {
-                importedInputs.push_back(inputIndex);
+                if (outputTensorHandle->CanBeImported(passThroughTensorHandle->Map(), forceImportMemorySource)
+                    && (outputTensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)))
+                {
+                    importedInputs.push_back(inputIndex);
+                }
+                passThroughTensorHandle->Unmap();
             }
-            passThroughTensorHandle->Unmap();
-
+            catch(const MemoryImportException& exception)
+            {
+                ARMNN_LOG(error) << "An error occurred attempting to import input_"
+                                           << inputIndex << " : " << exception.what();
+                passThroughTensorHandle->Unmap();
+            }
             inputIndex++;
         }
 
@@ -1576,7 +1584,6 @@
         for (const BindableLayer* const outputLayer : graph.GetOutputLayers())
         {
             auto inputTensorHandle = m_PreImportedOutputHandles[outputIndex].m_TensorHandle.get();
-
             if (!inputTensorHandle)
             {
                 outputIndex++;
@@ -1596,11 +1603,19 @@
             }
 
             const auto outputTensor = *it;
-            // Check if the output memory can be imported
-            if (inputTensorHandle->CanBeImported(outputTensor.second.GetMemoryArea(), forceImportMemorySource)
-                && inputTensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
+            try
             {
-                importedOutputs.push_back(outputIndex);
+                // Check if the output memory can be imported
+                if (inputTensorHandle->CanBeImported(outputTensor.second.GetMemoryArea(), forceImportMemorySource)
+                    && inputTensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
+                {
+                    importedOutputs.push_back(outputIndex);
+                }
+            }
+            catch(const MemoryImportException& exception)
+            {
+                ARMNN_LOG(error) << "An error occurred attempting to import output_"
+                                 << outputIndex << " : " << exception.what();
             }
             outputIndex++;
         }
diff --git a/src/backends/cl/ClImportTensorHandle.hpp b/src/backends/cl/ClImportTensorHandle.hpp
index 54710d8..aba12d0 100644
--- a/src/backends/cl/ClImportTensorHandle.hpp
+++ b/src/backends/cl/ClImportTensorHandle.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -101,7 +101,6 @@
                     CL_IMPORT_TYPE_HOST_ARM,
                     0
                 };
-
                 return ClImport(importProperties, memory);
             }
             if (source == MemorySource::DmaBuf)
@@ -185,59 +184,14 @@
         }
     }
 
-    virtual bool CanBeImported(void* memory, MemorySource source) override
+    virtual bool CanBeImported(void* /*memory*/, MemorySource source) override
     {
         if (m_ImportFlags & static_cast<MemorySourceFlags>(source))
         {
             if (source == MemorySource::Malloc)
             {
-                const cl_import_properties_arm importProperties[] =
-                        {
-                                CL_IMPORT_TYPE_ARM,
-                                CL_IMPORT_TYPE_HOST_ARM,
-                                0
-                        };
-
-                size_t totalBytes = m_Tensor.info()->total_size();
-
-                // Round the size of the mapping to match the CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE
-                // This does not change the size of the buffer, only the size of the mapping the buffer is mapped to
-                // We do this to match the behaviour of the Import function later on.
-                auto cachelineAlignment =
-                        arm_compute::CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
-                auto roundedSize = totalBytes;
-                if (totalBytes % cachelineAlignment != 0)
-                {
-                    roundedSize = cachelineAlignment + totalBytes - (totalBytes % cachelineAlignment);
-                }
-
-                cl_int error = CL_SUCCESS;
-                cl_mem buffer;
-                buffer = clImportMemoryARM(arm_compute::CLKernelLibrary::get().context().get(),
-                                           CL_MEM_READ_WRITE, importProperties, memory, roundedSize, &error);
-
-                // If we fail to map we know the import will not succeed and can return false.
-                // There is no memory to be released if error is not CL_SUCCESS
-                if (error != CL_SUCCESS)
-                {
-                    return false;
-                }
-                else
-                {
-                    // If import was successful we can release the mapping knowing import will succeed at workload
-                    // execution and return true
-                    error = clReleaseMemObject(buffer);
-                    if (error == CL_SUCCESS)
-                    {
-                        return true;
-                    }
-                    else
-                    {
-                        // If we couldn't release the mapping this constitutes a memory leak and throw an exception
-                        throw MemoryImportException("ClImportTensorHandle::Failed to unmap cl_mem buffer: "
-                                                    + std::to_string(error));
-                    }
-                }
+                // Returning true as ClImport() function will decide if memory can be imported or not
+                return true;
             }
         }
         else