Less code duplication in HAL 1.2

Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Change-Id: Ic2e8964745a4323efb1e06d466c0699f17a70c55
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 2cd560d..d095e41 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -84,7 +84,8 @@
 namespace armnn_driver
 {
 template<typename HalVersion>
-RequestThread<ArmnnPreparedModel, HalVersion, ArmnnCallback_1_0> ArmnnPreparedModel<HalVersion>::m_RequestThread;
+RequestThread<ArmnnPreparedModel, HalVersion, CallbackContext_1_0>
+    ArmnnPreparedModel<HalVersion>::m_RequestThread;
 
 template<typename HalVersion>
 template <typename TensorBindingCollection>
@@ -226,7 +227,7 @@
         NotifyCallbackAndCheck(callback, errorStatus, callingFunction);
     };
 
-    ArmnnCallback_1_0 armnnCb;
+    CallbackContext_1_0 armnnCb;
     armnnCb.callback = cb;
     // post the request for asynchronous execution
     m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, armnnCb);
@@ -237,18 +238,18 @@
 template<typename HalVersion>
 void ArmnnPreparedModel<HalVersion>::ExecuteGraph(
         std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
-        std::shared_ptr<armnn::InputTensors>& pInputTensors,
-        std::shared_ptr<armnn::OutputTensors>& pOutputTensors,
-        ArmnnCallback_1_0 cb)
+        armnn::InputTensors& inputTensors,
+        armnn::OutputTensors& outputTensors,
+        CallbackContext_1_0 cb)
 {
     ALOGV("ArmnnPreparedModel::ExecuteGraph(...)");
 
-    DumpTensorsIfRequired("Input", *pInputTensors);
+    DumpTensorsIfRequired("Input", inputTensors);
 
     // run it
     try
     {
-        armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors);
+        armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
         if (status != armnn::Status::Success)
         {
             ALOGW("EnqueueWorkload failed");
@@ -269,7 +270,7 @@
         return;
     }
 
-    DumpTensorsIfRequired("Output", *pOutputTensors);
+    DumpTensorsIfRequired("Output", outputTensors);
 
     // Commit output buffers.
     // Note that we update *all* pools, even if they aren't actually used as outputs -