IVGCVSW-1949 : Refactor ITensorHandle and move backend specifics to their place

Change-Id: I48242425c6a6856e13ebcee1b140cbd2af94a3aa
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.cpp b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
index acc28c9..ba7208c 100644
--- a/src/backends/backendsCommon/test/TensorCopyUtils.cpp
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
@@ -4,158 +4,20 @@
 //
 
 #include "TensorCopyUtils.hpp"
-
 #include <Half.hpp>
 
-#ifdef ARMCOMPUTECL_ENABLED
-#include <cl/ClTensorHandle.hpp>
-#endif
-
-#if ARMCOMPUTENEON_ENABLED
-#include <neon/NeonTensorHandle.hpp>
-#endif
-
-#if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#endif
-
-#include <backendsCommon/CpuTensorHandle.hpp>
-
-#include <boost/cast.hpp>
-
-#include <algorithm>
-#include <cstring>
-
-void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
 {
-    switch (tensorHandle->GetType())
-    {
-        case armnn::ITensorHandle::Cpu:
-        {
-            auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle);
-            memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes());
-            break;
-        }
-#ifdef ARMCOMPUTECL_ENABLED
-        case armnn::ITensorHandle::CL:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle);
-            handle->Map(true);
-            switch(handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::F16:
-                    CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor());
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            handle->Unmap();
-            break;
-        }
-#endif
-#if ARMCOMPUTENEON_ENABLED
-        case armnn::ITensorHandle::Neon:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle);
-            switch (handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            break;
-        }
-#endif
-        default:
-        {
-            throw armnn::UnimplementedException();
-        }
-    }
+    tensorHandle->CopyInFrom(memory);
 }
 
-void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle)
+void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle)
 {
-    switch (tensorHandle->GetType())
-    {
-        case armnn::ITensorHandle::Cpu:
-        {
-            auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle);
-            memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes());
-            break;
-        }
-#ifdef ARMCOMPUTECL_ENABLED
-        case armnn::ITensorHandle::CL:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle);
-            const_cast<armnn::IClTensorHandle*>(handle)->Map(true);
-            switch(handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
-                    break;
-                case arm_compute::DataType::F16:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem));
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            const_cast<armnn::IClTensorHandle*>(handle)->Unmap();
-            break;
-        }
-#endif
-#if ARMCOMPUTENEON_ENABLED
-        case armnn::ITensorHandle::Neon:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle);
-            switch (handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            break;
-        }
-#endif
-        default:
-        {
-            throw armnn::UnimplementedException();
-        }
-    }
+    tensorHandle->CopyOutTo(memory);
 }
 
-void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
 {
     tensorHandle->Allocate();
-    CopyDataToITensorHandle(tensorHandle, mem);
+    CopyDataToITensorHandle(tensorHandle, memory);
 }