IVGCVSW-1949 : Refactor ITensorHandle and move backend specifics to their place

Change-Id: I48242425c6a6856e13ebcee1b140cbd2af94a3aa
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index 46a7cb8..fe0c634 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -84,6 +84,16 @@
     }
 }
 
+void ScopedCpuTensorHandle::CopyOutTo(void* memory) const
+{
+    memcpy(memory, GetTensor<void>(), GetTensorInfo().GetNumBytes());
+}
+
+void ScopedCpuTensorHandle::CopyInFrom(const void* memory)
+{
+    memcpy(GetTensor<void>(), memory, GetTensorInfo().GetNumBytes());
+}
+
 void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
 {
     CopyFrom(other.GetTensor<void>(), other.GetTensorInfo().GetNumBytes());
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp
index 4138812..ae13d6c 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.hpp
@@ -31,11 +31,6 @@
         return m_TensorInfo;
     }
 
-    virtual ITensorHandle::Type GetType() const override
-    {
-        return ITensorHandle::Cpu;
-    }
-
     virtual void Manage() override {}
 
     virtual ITensorHandle* GetParent() const override { return nullptr; }
@@ -66,6 +61,10 @@
     void SetConstMemory(const void* mem) { m_Memory = mem; }
 
 private:
+    // Only used for testing
+    void CopyOutTo(void *) const override {}
+    void CopyInFrom(const void*) override {}
+
     ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
     ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
 
@@ -119,6 +118,10 @@
     virtual void Allocate() override;
 
 private:
+    // Only used for testing
+    void CopyOutTo(void* memory) const override;
+    void CopyInFrom(const void* memory) override;
+
     void CopyFrom(const ScopedCpuTensorHandle& other);
     void CopyFrom(const void* srcMemory, unsigned int numBytes);
 };
diff --git a/src/backends/backendsCommon/ITensorHandle.hpp b/src/backends/backendsCommon/ITensorHandle.hpp
index 02f4ed6..176b021 100644
--- a/src/backends/backendsCommon/ITensorHandle.hpp
+++ b/src/backends/backendsCommon/ITensorHandle.hpp
@@ -12,13 +12,6 @@
 class ITensorHandle
 {
 public:
-    enum Type
-    {
-        Cpu,
-        CL,
-        Neon
-    };
-
     virtual ~ITensorHandle(){}
 
     /// Indicate to the memory manager that this resource is active.
@@ -29,10 +22,6 @@
     /// This is used to compute overlapping lifetimes of resources.
     virtual void Allocate() = 0;
 
-    /// Get the type backend associated with the tensor handle.
-    /// \return Type enum
-    virtual ITensorHandle::Type GetType() const = 0;
-
     /// Get the parent tensor if this is a subtensor.
     /// \return a pointer to the parent tensor. Otherwise nullptr if not a subtensor.
     virtual ITensorHandle* GetParent() const = 0;
@@ -64,10 +53,14 @@
     /// \return a TensorShape filled with the strides for each dimension
     virtual TensorShape GetStrides() const = 0;
 
-    /// Get the number of elements for each dimension orderd from slowest iterating dimension
+    /// Get the number of elements for each dimension ordered from slowest iterating dimension
     /// to fastest iterating dimension.
     /// \return a TensorShape filled with the number of elements for each dimension.
     virtual TensorShape GetShape() const = 0;
+
+    // Testing support to be able to verify and set tensor data content
+    virtual void CopyOutTo(void* memory) const = 0;
+    virtual void CopyInFrom(const void* memory) = 0;
 };
 
 }
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.cpp b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
index acc28c9..ba7208c 100644
--- a/src/backends/backendsCommon/test/TensorCopyUtils.cpp
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
@@ -4,158 +4,20 @@
 //
 
 #include "TensorCopyUtils.hpp"
-
 #include <Half.hpp>
 
-#ifdef ARMCOMPUTECL_ENABLED
-#include <cl/ClTensorHandle.hpp>
-#endif
-
-#if ARMCOMPUTENEON_ENABLED
-#include <neon/NeonTensorHandle.hpp>
-#endif
-
-#if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#endif
-
-#include <backendsCommon/CpuTensorHandle.hpp>
-
-#include <boost/cast.hpp>
-
-#include <algorithm>
-#include <cstring>
-
-void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
 {
-    switch (tensorHandle->GetType())
-    {
-        case armnn::ITensorHandle::Cpu:
-        {
-            auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle);
-            memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes());
-            break;
-        }
-#ifdef ARMCOMPUTECL_ENABLED
-        case armnn::ITensorHandle::CL:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle);
-            handle->Map(true);
-            switch(handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::F16:
-                    CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor());
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            handle->Unmap();
-            break;
-        }
-#endif
-#if ARMCOMPUTENEON_ENABLED
-        case armnn::ITensorHandle::Neon:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle);
-            switch (handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            break;
-        }
-#endif
-        default:
-        {
-            throw armnn::UnimplementedException();
-        }
-    }
+    tensorHandle->CopyInFrom(memory);
 }
 
-void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle)
+void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle)
 {
-    switch (tensorHandle->GetType())
-    {
-        case armnn::ITensorHandle::Cpu:
-        {
-            auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle);
-            memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes());
-            break;
-        }
-#ifdef ARMCOMPUTECL_ENABLED
-        case armnn::ITensorHandle::CL:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle);
-            const_cast<armnn::IClTensorHandle*>(handle)->Map(true);
-            switch(handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
-                    break;
-                case arm_compute::DataType::F16:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem));
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            const_cast<armnn::IClTensorHandle*>(handle)->Unmap();
-            break;
-        }
-#endif
-#if ARMCOMPUTENEON_ENABLED
-        case armnn::ITensorHandle::Neon:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle);
-            switch (handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            break;
-        }
-#endif
-        default:
-        {
-            throw armnn::UnimplementedException();
-        }
-    }
+    tensorHandle->CopyOutTo(memory);
 }
 
-void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
 {
     tensorHandle->Allocate();
-    CopyDataToITensorHandle(tensorHandle, mem);
+    CopyDataToITensorHandle(tensorHandle, memory);
 }
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.hpp b/src/backends/backendsCommon/test/TensorCopyUtils.hpp
index 2187523..36f6369 100644
--- a/src/backends/backendsCommon/test/TensorCopyUtils.hpp
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.hpp
@@ -8,8 +8,8 @@
 
 #include <backendsCommon/ITensorHandle.hpp>
 
-void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory);
 
 void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle);
 
-void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
\ No newline at end of file
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory);
\ No newline at end of file