IVGCVSW-1951 Update NeonWorkloadUtils

Change-Id: I147dbf6811f84ec4588264d636a36efc8ec56f72
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index e63baa0..6dfd951 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -37,5 +37,4 @@
         workloads/NeonSoftmaxBaseWorkload.cpp \
         workloads/NeonSoftmaxFloatWorkload.cpp \
         workloads/NeonSoftmaxUint8Workload.cpp \
-        workloads/NeonSubtractionFloatWorkload.cpp \
-        workloads/NeonWorkloadUtils.cpp
+        workloads/NeonSubtractionFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index fddbcb5..d847df7 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -57,7 +57,6 @@
     NeonSubtractionFloatWorkload.cpp
     NeonSubtractionFloatWorkload.hpp
     NeonWorkloads.hpp
-    NeonWorkloadUtils.cpp
     NeonWorkloadUtils.hpp
 )
 
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
index 2383e78..f7056a5 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
@@ -66,10 +66,10 @@
                       m_Gamma.get(),
                       m_Data.m_Parameters.m_Eps);
 
-    InitializeArmComputeTensorDataForFloatTypes(*m_Mean, m_Data.m_Mean);
-    InitializeArmComputeTensorDataForFloatTypes(*m_Variance, m_Data.m_Variance);
-    InitializeArmComputeTensorDataForFloatTypes(*m_Gamma, m_Data.m_Gamma);
-    InitializeArmComputeTensorDataForFloatTypes(*m_Beta, m_Data.m_Beta);
+    InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean);
+    InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance);
+    InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma);
+    InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta);
 
     // Force Compute Library to perform the necessary copying and reshaping, after which
     // delete all the input tensors that will no longer be needed
@@ -92,5 +92,3 @@
 }
 
 } //namespace armnn
-
-
diff --git a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
index 8da3e47..b11d10f 100644
--- a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
@@ -109,30 +109,8 @@
     }
     BOOST_ASSERT(m_ConvolutionLayer);
 
-    armnn::DataType dataType = m_Data.m_Weight->GetTensorInfo().GetDataType();
+    InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
 
-    switch (dataType)
-    {
-        case DataType::Float16:
-        {
-            InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<Half>());
-            break;
-        }
-        case DataType::Float32:
-        {
-            InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<float>());
-            break;
-        }
-        case DataType::QuantisedAsymm8:
-        {
-            InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<uint8_t>());
-            break;
-        }
-        default:
-        {
-            BOOST_ASSERT_MSG(false, "Unknown DataType.");
-        }
-    }
 }
 
 template<armnn::DataType... dataTypes>
@@ -147,4 +125,3 @@
 template class NeonConvolution2dBaseWorkload<armnn::DataType::QuantisedAsymm8>;
 
 } //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp
index cd26f8d..9969154 100644
--- a/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp
@@ -18,7 +18,7 @@
 {
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
-        InitializeArmComputeTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
+        InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
     }
 
     m_ConvolutionLayer->prepare();
@@ -37,4 +37,3 @@
 }
 
 } //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp b/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp
index 5affe68..8572cbf 100644
--- a/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp
@@ -14,7 +14,7 @@
 {
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
-        InitialiseArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias->template GetConstTensor<int32_t>());
+        InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
     }
 
     m_ConvolutionLayer->prepare();
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
index 4b266f3..9790998 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
@@ -69,11 +69,11 @@
 
     BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
 
-    InitializeArmComputeTensorDataForFloatTypes(*m_KernelTensor, m_Data.m_Weight);
+    InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
 
     if (m_BiasTensor)
     {
-        InitializeArmComputeTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
+        InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
     }
 
     m_pDepthwiseConvolutionLayer->prepare();
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
index 6c6c2df..25d00f9 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
@@ -69,11 +69,11 @@
 
     BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
 
-    InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+    InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
 
     if (m_BiasTensor)
     {
-        InitialiseArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+        InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
     }
 
     m_pDepthwiseConvolutionLayer->prepare();
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 8cebb4f..51fd7af 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -69,22 +69,22 @@
     // Allocate
     if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
     {
-        InitialiseArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+        InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
     }
     else
     {
-        InitializeArmComputeTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
+        InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
     }
 
     if (m_BiasesTensor)
     {
         if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
         {
-            InitialiseArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+            InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
         }
         else
         {
-            InitializeArmComputeTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
+            InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
         }
     }
 
@@ -107,4 +107,3 @@
 }
 
 } //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index 5899f13..7745cec 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -169,57 +169,57 @@
 
     armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
 
-    InitialiseArmComputeTensorData(*m_InputToForgetWeightsTensor,
-                                   m_Data.m_InputToForgetWeights->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_InputToCellWeightsTensor,
-                                   m_Data.m_InputToCellWeights->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_InputToOutputWeightsTensor,
-                                   m_Data.m_InputToOutputWeights->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
-                                   m_Data.m_RecurrentToForgetWeights->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
-                                   m_Data.m_RecurrentToCellWeights->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
-                                   m_Data.m_RecurrentToOutputWeights->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_ForgetGateBiasTensor,
-                                   m_Data.m_ForgetGateBias->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_CellBiasTensor,
-                                   m_Data.m_CellBias->GetConstTensor<float>());
-    InitialiseArmComputeTensorData(*m_OutputGateBiasTensor,
-                                   m_Data.m_OutputGateBias->GetConstTensor<float>());
+    InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
+                                   m_Data.m_InputToForgetWeights);
+    InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
+                                   m_Data.m_InputToCellWeights);
+    InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
+                                   m_Data.m_InputToOutputWeights);
+    InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
+                                   m_Data.m_RecurrentToForgetWeights);
+    InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
+                                   m_Data.m_RecurrentToCellWeights);
+    InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
+                                   m_Data.m_RecurrentToOutputWeights);
+    InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
+                                   m_Data.m_ForgetGateBias);
+    InitializeArmComputeTensorData(*m_CellBiasTensor,
+                                   m_Data.m_CellBias);
+    InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
+                                   m_Data.m_OutputGateBias);
 
     if (!m_Data.m_Parameters.m_CifgEnabled)
     {
-        InitialiseArmComputeTensorData(*m_InputToInputWeightsTensor,
-                                       m_Data.m_InputToInputWeights->GetConstTensor<float>());
-        InitialiseArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
-                                       m_Data.m_RecurrentToInputWeights->GetConstTensor<float>());
+        InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
+                                       m_Data.m_InputToInputWeights);
+        InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
+                                       m_Data.m_RecurrentToInputWeights);
         if (m_Data.m_CellToInputWeights != nullptr)
         {
-            InitialiseArmComputeTensorData(*m_CellToInputWeightsTensor,
-                                           m_Data.m_CellToInputWeights->GetConstTensor<float>());
+            InitializeArmComputeTensorData(*m_CellToInputWeightsTensor,
+                                           m_Data.m_CellToInputWeights);
         }
-        InitialiseArmComputeTensorData(*m_InputGateBiasTensor,
-                                       m_Data.m_InputGateBias->GetConstTensor<float>());
+        InitializeArmComputeTensorData(*m_InputGateBiasTensor,
+                                       m_Data.m_InputGateBias);
     }
 
     if (m_Data.m_Parameters.m_ProjectionEnabled)
     {
-        InitialiseArmComputeTensorData(*m_ProjectionWeightsTensor,
-                                       m_Data.m_ProjectionWeights->GetConstTensor<float>());
+        InitializeArmComputeTensorData(*m_ProjectionWeightsTensor,
+                                       m_Data.m_ProjectionWeights);
         if (m_Data.m_ProjectionBias != nullptr)
         {
-            InitialiseArmComputeTensorData(*m_ProjectionBiasTensor,
-                                           m_Data.m_ProjectionBias->GetConstTensor<float>());
+            InitializeArmComputeTensorData(*m_ProjectionBiasTensor,
+                                           m_Data.m_ProjectionBias);
         }
     }
 
     if (m_Data.m_Parameters.m_PeepholeEnabled)
     {
-        InitialiseArmComputeTensorData(*m_CellToForgetWeightsTensor,
-                                       m_Data.m_CellToForgetWeights->GetConstTensor<float>());
-        InitialiseArmComputeTensorData(*m_CellToOutputWeightsTensor,
-                                       m_Data.m_CellToOutputWeights->GetConstTensor<float>());
+        InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor,
+                                       m_Data.m_CellToForgetWeights);
+        InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor,
+                                       m_Data.m_CellToOutputWeights);
     }
 
     // Force Compute Library to perform the necessary copying and reshaping, after which
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.cpp b/src/backends/neon/workloads/NeonWorkloadUtils.cpp
deleted file mode 100644
index 195f090..0000000
--- a/src/backends/neon/workloads/NeonWorkloadUtils.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "NeonWorkloadUtils.hpp"
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/aclCommon/ArmComputeUtils.hpp>
-#include <backends/neon/NeonTensorHandle.hpp>
-#include <backends/neon/NeonLayerSupport.hpp>
-#include <backends/CpuTensorHandle.hpp>
-
-#include <armnn/Utils.hpp>
-#include <armnn/Exceptions.hpp>
-
-#include <cstring>
-#include <boost/assert.hpp>
-#include <boost/cast.hpp>
-#include <boost/format.hpp>
-
-#include "Profiling.hpp"
-
-#include <armnn/Types.hpp>
-#include <Half.hpp>
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-// Allocates a tensor and copy the contents in data to the tensor contents.
-template<typename T>
-void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data)
-{
-    InitialiseArmComputeTensorEmpty(tensor);
-    CopyArmComputeITensorData(data, tensor);
-}
-
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const Half* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const float* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const uint8_t* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const int32_t* data);
-
-void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor,
-                                                 const ConstCpuTensorHandle* handle)
-{
-    BOOST_ASSERT(handle);
-    switch(handle->GetTensorInfo().GetDataType())
-    {
-        case DataType::Float16:
-            InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<Half>());
-            break;
-        case DataType::Float32:
-            InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<float>());
-            break;
-        default:
-            BOOST_ASSERT_MSG(false, "Unexpected floating point type.");
-    }
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index c4accd6..48ec753 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -5,30 +5,54 @@
 #pragma once
 
 #include <backends/Workload.hpp>
-
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
 #include <backends/neon/NeonTensorHandle.hpp>
 #include <backends/neon/NeonTimer.hpp>
-
-#include <arm_compute/core/Types.h>
-#include <arm_compute/core/Helpers.h>
+#include <backends/CpuTensorHandle.hpp>
 #include <arm_compute/runtime/NEON/NEFunctions.h>
-#include <arm_compute/runtime/SubTensor.h>
 
-#include <boost/cast.hpp>
+#include <Half.hpp>
 
-namespace armnn
-{
-class Layer;
-
-template<typename T>
-void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data);
-
-void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor, const ConstCpuTensorHandle* handle);
-} //namespace armnn
-
-
-#define     ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
+#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
     ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
                                                   name, \
                                                   armnn::NeonTimer(), \
                                                   armnn::WallClockTimer())
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+template <typename T>
+void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
+{
+    InitialiseArmComputeTensorEmpty(dstTensor);
+    CopyArmComputeITensorData(srcData, dstTensor);
+}
+
+inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
+                                           const ConstCpuTensorHandle* handle)
+{
+    BOOST_ASSERT(handle);
+
+    switch(handle->GetTensorInfo().GetDataType())
+    {
+        case DataType::Float16:
+            CopyArmComputeTensorData(tensor, handle->GetConstTensor<armnn::Half>());
+            break;
+        case DataType::Float32:
+            CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
+            break;
+        case DataType::QuantisedAsymm8:
+            CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
+            break;
+        case DataType::Signed32:
+            CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
+            break;
+        default:
+            BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+    }
+};
+
+} //namespace armnn