IVGCVSW-6620 Update the async api to use ExecutionData

* ExecutionData holds a void* which can be assigned to data required
  for execution in a backend. WorkingMemDescriptors are used in the Ref
  backend which hold TensorHandles for inputs and outputs.
* Updated ExecuteAsync functions to take ExecutionData.
* Added CreateExecutionData and UpdateExectutionData to IBackendInternal.
* Streamlined experimental IWorkingMemHandle API by removing map related
  function and unused m_workingMemDescriptorMap from WorkingMemHandle.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I54b0aab12872011743a141eb42dae200227769af
diff --git a/include/armnn/IWorkingMemHandle.hpp b/include/armnn/IWorkingMemHandle.hpp
index bbc4913..62f7111 100644
--- a/include/armnn/IWorkingMemHandle.hpp
+++ b/include/armnn/IWorkingMemHandle.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -13,6 +13,8 @@
 namespace experimental
 {
 
+struct ExecutionData;
+
 struct WorkingMemDescriptor;
 
 class IWorkingMemHandle
@@ -33,12 +35,14 @@
     /// IsAllocated returns true if the backing memory is currently allocated.
     virtual bool IsAllocated() = 0;
 
-    /// Get the WorkingMemDescriptor for a Layer.
-    virtual WorkingMemDescriptor& GetWorkingMemDescriptor(LayerGuid id) = 0;
-
     /// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
     /// the Workloads in a topologically sorted graph.
     virtual WorkingMemDescriptor& GetWorkingMemDescriptorAt(unsigned int id) = 0;
+
+    /// Get the ExecutionData at an index.
+    /// The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it.
+    /// The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.
+    virtual std::pair<BackendId, ExecutionData>& GetExecutionDataAt(unsigned int id) = 0;
 };
 
 } // end experimental namespace
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index e393a7e..a18adba 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -9,7 +9,9 @@
 #include <armnn/IRuntime.hpp>
 #include <armnn/Deprecated.hpp>
 
+#include <ExecutionData.hpp>
 #include <ISubgraphViewConverter.hpp>
+#include <WorkingMemDescriptor.hpp>
 
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
@@ -205,6 +207,27 @@
     ///
     /// \return - Returns 0 if backend does not support caching otherwise number of files cached
     virtual unsigned int GetNumberOfCacheFiles() const { return 0; }
+
+    /// Returns ExecutionData for the backend
+    ///
+    /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
+    /// \return - Returns backend specific ExecutionData generated for a layer
+    virtual ExecutionData CreateExecutionData(WorkingMemDescriptor& workingMemDescriptor) const
+    {
+        IgnoreUnused(workingMemDescriptor);
+        throw armnn::Exception("CreateExecutionData: Function has not been implemented in backend.");
+    };
+
+    /// Update the ExecutionData for a layer. It is used to swap in pre-imported tensor handles
+    ///
+    /// \param executionData - Backend specific ExecutionData generated for a layer
+    /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
+    virtual void UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const
+    {
+        IgnoreUnused(executionData);
+        IgnoreUnused(workingMemDescriptor);
+        throw armnn::Exception("UpdateExecutionData: Function has not been implemented in backend.");
+    };
 };
 
 using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp
index 22baf92..78c0756 100644
--- a/include/armnn/backends/IWorkload.hpp
+++ b/include/armnn/backends/IWorkload.hpp
@@ -13,7 +13,7 @@
 namespace experimental
 {
 
-struct WorkingMemDescriptor;
+struct ExecutionData;
 
 } // end experimental namespace
 
@@ -30,7 +30,7 @@
 
     virtual void Execute() const = 0;
 
-    virtual void ExecuteAsync(WorkingMemDescriptor& desc) = 0;
+    virtual void ExecuteAsync(ExecutionData& executionData) = 0;
 
     virtual arm::pipe::ProfilingGuid GetGuid() const = 0;
 
diff --git a/include/armnn/backends/MemCopyWorkload.hpp b/include/armnn/backends/MemCopyWorkload.hpp
index da23f52..ebf6077 100644
--- a/include/armnn/backends/MemCopyWorkload.hpp
+++ b/include/armnn/backends/MemCopyWorkload.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -17,7 +17,7 @@
 public:
     CopyMemGenericWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info);
     void Execute() const override;
-    void ExecuteAsync(WorkingMemDescriptor& descriptor) override;
+    void ExecuteAsync(ExecutionData& executionData) override;
 
 private:
     using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
diff --git a/include/armnn/backends/Workload.hpp b/include/armnn/backends/Workload.hpp
index 6c9fcab..be6fbd9 100644
--- a/include/armnn/backends/Workload.hpp
+++ b/include/armnn/backends/Workload.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -8,6 +8,7 @@
 #include "WorkloadData.hpp"
 #include "WorkloadInfo.hpp"
 #include "WorkingMemDescriptor.hpp"
+#include "ExecutionData.hpp"
 
 #include <armnn/Logging.hpp>
 
@@ -40,14 +41,15 @@
         m_Data.Validate(info);
     }
 
-    void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override
+    void ExecuteAsync(ExecutionData& executionData) override
     {
         ARMNN_LOG(info) << "Using default async workload execution, this will network affect performance";
 #if !defined(ARMNN_DISABLE_THREADS)
         std::lock_guard<std::mutex> lockGuard(m_AsyncWorkloadMutex);
 #endif
-        m_Data.m_Inputs = workingMemDescriptor.m_Inputs;
-        m_Data.m_Outputs = workingMemDescriptor.m_Outputs;
+        WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+        m_Data.m_Inputs = workingMemDescriptor->m_Inputs;
+        m_Data.m_Outputs = workingMemDescriptor->m_Outputs;
 
         Execute();
     };