IVGCVSW-1949 : Refactor ITensorHandle and move backend specifics to their place

Change-Id: I48242425c6a6856e13ebcee1b140cbd2af94a3aa
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index 46a7cb8..fe0c634 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -84,6 +84,16 @@
     }
 }
 
+void ScopedCpuTensorHandle::CopyOutTo(void* memory) const
+{
+    memcpy(memory, GetTensor<void>(), GetTensorInfo().GetNumBytes());
+}
+
+void ScopedCpuTensorHandle::CopyInFrom(const void* memory)
+{
+    memcpy(GetTensor<void>(), memory, GetTensorInfo().GetNumBytes());
+}
+
 void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
 {
     CopyFrom(other.GetTensor<void>(), other.GetTensorInfo().GetNumBytes());
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp
index 4138812..ae13d6c 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.hpp
@@ -31,11 +31,6 @@
         return m_TensorInfo;
     }
 
-    virtual ITensorHandle::Type GetType() const override
-    {
-        return ITensorHandle::Cpu;
-    }
-
     virtual void Manage() override {}
 
     virtual ITensorHandle* GetParent() const override { return nullptr; }
@@ -66,6 +61,10 @@
     void SetConstMemory(const void* mem) { m_Memory = mem; }
 
 private:
+    // Only used for testing
+    void CopyOutTo(void *) const override {}
+    void CopyInFrom(const void*) override {}
+
     ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
     ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
 
@@ -119,6 +118,10 @@
     virtual void Allocate() override;
 
 private:
+    // Only used for testing
+    void CopyOutTo(void* memory) const override;
+    void CopyInFrom(const void* memory) override;
+
     void CopyFrom(const ScopedCpuTensorHandle& other);
     void CopyFrom(const void* srcMemory, unsigned int numBytes);
 };
diff --git a/src/backends/backendsCommon/ITensorHandle.hpp b/src/backends/backendsCommon/ITensorHandle.hpp
index 02f4ed6..176b021 100644
--- a/src/backends/backendsCommon/ITensorHandle.hpp
+++ b/src/backends/backendsCommon/ITensorHandle.hpp
@@ -12,13 +12,6 @@
 class ITensorHandle
 {
 public:
-    enum Type
-    {
-        Cpu,
-        CL,
-        Neon
-    };
-
     virtual ~ITensorHandle(){}
 
     /// Indicate to the memory manager that this resource is active.
@@ -29,10 +22,6 @@
     /// This is used to compute overlapping lifetimes of resources.
     virtual void Allocate() = 0;
 
-    /// Get the type backend associated with the tensor handle.
-    /// \return Type enum
-    virtual ITensorHandle::Type GetType() const = 0;
-
     /// Get the parent tensor if this is a subtensor.
     /// \return a pointer to the parent tensor. Otherwise nullptr if not a subtensor.
     virtual ITensorHandle* GetParent() const = 0;
@@ -64,10 +53,14 @@
     /// \return a TensorShape filled with the strides for each dimension
     virtual TensorShape GetStrides() const = 0;
 
-    /// Get the number of elements for each dimension orderd from slowest iterating dimension
+    /// Get the number of elements for each dimension ordered from slowest iterating dimension
     /// to fastest iterating dimension.
     /// \return a TensorShape filled with the number of elements for each dimension.
     virtual TensorShape GetShape() const = 0;
+
+    // Testing support to be able to verify and set tensor data content
+    virtual void CopyOutTo(void* memory) const = 0;
+    virtual void CopyInFrom(const void* memory) = 0;
 };
 
 }
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.cpp b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
index acc28c9..ba7208c 100644
--- a/src/backends/backendsCommon/test/TensorCopyUtils.cpp
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
@@ -4,158 +4,20 @@
 //
 
 #include "TensorCopyUtils.hpp"
-
 #include <Half.hpp>
 
-#ifdef ARMCOMPUTECL_ENABLED
-#include <cl/ClTensorHandle.hpp>
-#endif
-
-#if ARMCOMPUTENEON_ENABLED
-#include <neon/NeonTensorHandle.hpp>
-#endif
-
-#if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#endif
-
-#include <backendsCommon/CpuTensorHandle.hpp>
-
-#include <boost/cast.hpp>
-
-#include <algorithm>
-#include <cstring>
-
-void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
 {
-    switch (tensorHandle->GetType())
-    {
-        case armnn::ITensorHandle::Cpu:
-        {
-            auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle);
-            memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes());
-            break;
-        }
-#ifdef ARMCOMPUTECL_ENABLED
-        case armnn::ITensorHandle::CL:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle);
-            handle->Map(true);
-            switch(handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::F16:
-                    CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor());
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            handle->Unmap();
-            break;
-        }
-#endif
-#if ARMCOMPUTENEON_ENABLED
-        case armnn::ITensorHandle::Neon:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle);
-            switch (handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            break;
-        }
-#endif
-        default:
-        {
-            throw armnn::UnimplementedException();
-        }
-    }
+    tensorHandle->CopyInFrom(memory);
 }
 
-void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle)
+void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle)
 {
-    switch (tensorHandle->GetType())
-    {
-        case armnn::ITensorHandle::Cpu:
-        {
-            auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle);
-            memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes());
-            break;
-        }
-#ifdef ARMCOMPUTECL_ENABLED
-        case armnn::ITensorHandle::CL:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle);
-            const_cast<armnn::IClTensorHandle*>(handle)->Map(true);
-            switch(handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
-                    break;
-                case arm_compute::DataType::F16:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem));
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            const_cast<armnn::IClTensorHandle*>(handle)->Unmap();
-            break;
-        }
-#endif
-#if ARMCOMPUTENEON_ENABLED
-        case armnn::ITensorHandle::Neon:
-        {
-            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
-            auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle);
-            switch (handle->GetDataType())
-            {
-                case arm_compute::DataType::F32:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
-                    break;
-                case arm_compute::DataType::QASYMM8:
-                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
-                    break;
-                default:
-                {
-                    throw armnn::UnimplementedException();
-                }
-            }
-            break;
-        }
-#endif
-        default:
-        {
-            throw armnn::UnimplementedException();
-        }
-    }
+    tensorHandle->CopyOutTo(memory);
 }
 
-void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
 {
     tensorHandle->Allocate();
-    CopyDataToITensorHandle(tensorHandle, mem);
+    CopyDataToITensorHandle(tensorHandle, memory);
 }
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.hpp b/src/backends/backendsCommon/test/TensorCopyUtils.hpp
index 2187523..36f6369 100644
--- a/src/backends/backendsCommon/test/TensorCopyUtils.hpp
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.hpp
@@ -8,8 +8,8 @@
 
 #include <backendsCommon/ITensorHandle.hpp>
 
-void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory);
 
 void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle);
 
-void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
\ No newline at end of file
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory);
\ No newline at end of file
diff --git a/src/backends/cl/ClTensorHandle.hpp b/src/backends/cl/ClTensorHandle.hpp
index 0f1f583..f791ee8 100644
--- a/src/backends/cl/ClTensorHandle.hpp
+++ b/src/backends/cl/ClTensorHandle.hpp
@@ -7,6 +7,8 @@
 #include <backendsCommon/OutputHandler.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
+#include <Half.hpp>
+
 #include <arm_compute/runtime/CL/CLTensor.h>
 #include <arm_compute/runtime/CL/CLSubTensor.h>
 #include <arm_compute/runtime/CL/CLMemoryGroup.h>
@@ -59,8 +61,6 @@
     }
     virtual void Unmap() const override { const_cast<arm_compute::CLTensor*>(&m_Tensor)->unmap(); }
 
-    virtual ITensorHandle::Type GetType() const override { return ITensorHandle::CL; }
-
     virtual ITensorHandle* GetParent() const override { return nullptr; }
 
     virtual arm_compute::DataType GetDataType() const override
@@ -82,7 +82,60 @@
     {
         return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
     }
+
 private:
+    // Only used for testing
+    void CopyOutTo(void* memory) const override
+    {
+        const_cast<armnn::ClTensorHandle*>(this)->Map(true);
+        switch(this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<float*>(memory));
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<uint8_t*>(memory));
+                break;
+            case arm_compute::DataType::F16:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<armnn::Half*>(memory));
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+        const_cast<armnn::ClTensorHandle*>(this)->Unmap();
+    }
+
+    // Only used for testing
+    void CopyInFrom(const void* memory) override
+    {
+        this->Map(true);
+        switch(this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
+                                                                 this->GetTensor());
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
+                                                                 this->GetTensor());
+                break;
+            case arm_compute::DataType::F16:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
+                                                                 this->GetTensor());
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+        this->Unmap();
+    }
+
     arm_compute::CLTensor m_Tensor;
     std::shared_ptr<arm_compute::CLMemoryGroup> m_MemoryGroup;
 };
@@ -111,8 +164,6 @@
     }
     virtual void Unmap() const override { const_cast<arm_compute::CLSubTensor*>(&m_Tensor)->unmap(); }
 
-    virtual ITensorHandle::Type GetType() const override { return ITensorHandle::CL; }
-
     virtual ITensorHandle* GetParent() const override { return parentHandle; }
 
     virtual arm_compute::DataType GetDataType() const override
@@ -133,9 +184,60 @@
     }
 
 private:
+    // Only used for testing
+    void CopyOutTo(void* memory) const override
+    {
+        const_cast<ClSubTensorHandle*>(this)->Map(true);
+        switch(this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<float*>(memory));
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<uint8_t*>(memory));
+                break;
+            case arm_compute::DataType::F16:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<armnn::Half*>(memory));
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+        const_cast<ClSubTensorHandle*>(this)->Unmap();
+    }
+
+    // Only used for testing
+    void CopyInFrom(const void* memory) override
+    {
+        this->Map(true);
+        switch(this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
+                                                                 this->GetTensor());
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
+                                                                 this->GetTensor());
+                break;
+            case arm_compute::DataType::F16:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::Half*>(memory),
+                                                                 this->GetTensor());
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+        this->Unmap();
+    }
+
     mutable arm_compute::CLSubTensor m_Tensor;
     ITensorHandle* parentHandle = nullptr;
-
 };
 
-}
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index e205cf1..43c147f 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -110,8 +110,6 @@
                                                                         TensorShape const&   subTensorShape,
                                                                         unsigned int const* subTensorOrigin) const
 {
-    BOOST_ASSERT(parent.GetType() == ITensorHandle::CL);
-
     arm_compute::Coordinates coords;
     arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
 
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index 63e2a78..7206b6f 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -55,8 +55,6 @@
         m_MemoryGroup->manage(&m_Tensor);
     }
 
-    virtual ITensorHandle::Type GetType() const override { return ITensorHandle::Neon; }
-
     virtual ITensorHandle* GetParent() const override { return nullptr; }
 
     virtual arm_compute::DataType GetDataType() const override
@@ -87,6 +85,46 @@
     }
 
 private:
+    // Only used for testing
+    void CopyOutTo(void* memory) const override
+    {
+        switch (this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<float*>(memory));
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<uint8_t*>(memory));
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+    }
+
+    // Only used for testing
+    void CopyInFrom(const void* memory) override
+    {
+        switch (this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
+                                                                 this->GetTensor());
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
+                                                                 this->GetTensor());
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+    }
+
     arm_compute::Tensor m_Tensor;
     std::shared_ptr<arm_compute::MemoryGroup> m_MemoryGroup;
 };
@@ -108,8 +146,6 @@
     virtual void Allocate() override {}
     virtual void Manage() override {}
 
-    virtual ITensorHandle::Type GetType() const override { return ITensorHandle::Neon; }
-
     virtual ITensorHandle* GetParent() const override { return parentHandle; }
 
     virtual arm_compute::DataType GetDataType() const override
@@ -134,9 +170,50 @@
     {
         return armcomputetensorutils::GetShape(m_Tensor.info()->tensor_shape());
     }
+
 private:
+    // Only used for testing
+    void CopyOutTo(void* memory) const override
+    {
+        switch (this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<float*>(memory));
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<uint8_t*>(memory));
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+    }
+
+    // Only used for testing
+    void CopyInFrom(const void* memory) override
+    {
+        switch (this->GetDataType())
+        {
+            case arm_compute::DataType::F32:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const float*>(memory),
+                                                                 this->GetTensor());
+                break;
+            case arm_compute::DataType::QASYMM8:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
+                                                                 this->GetTensor());
+                break;
+            default:
+            {
+                throw armnn::UnimplementedException();
+            }
+        }
+    }
+
     arm_compute::SubTensor m_Tensor;
     ITensorHandle* parentHandle = nullptr;
 };
 
-}
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index fc38906..6046867 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -54,8 +54,6 @@
     TensorShape const& subTensorShape,
     unsigned int const* subTensorOrigin) const
 {
-    BOOST_ASSERT(parent.GetType() == ITensorHandle::Neon);
-
     const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
 
     arm_compute::Coordinates coords;