IVGCVSW-1938: Move backend-specific source files to the corresponding backend

Change-Id: I558a9a007604afc55e536d877f8da7d0215cc9c3
diff --git a/src/backends/aclCommon/test/CMakeLists.txt b/src/backends/aclCommon/test/CMakeLists.txt
new file mode 100644
index 0000000..98008ed
--- /dev/null
+++ b/src/backends/aclCommon/test/CMakeLists.txt
@@ -0,0 +1,14 @@
+#
+# Copyright © 2017 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnAclCommonUnitTests_sources
+    CreateWorkloadClNeon.hpp
+    MemCopyTests.cpp
+)
+
+add_library(armnnAclCommonUnitTests OBJECT ${armnnAclCommonUnitTests_sources})
+target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
+target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnAclCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
\ No newline at end of file
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
new file mode 100644
index 0000000..a79cfe6
--- /dev/null
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/test/CreateWorkload.hpp>
+
+#include <backends/MemCopyWorkload.hpp>
+#include <backends/reference/RefWorkloadFactory.hpp>
+
+#if ARMCOMPUTECL_ENABLED
+#include <backends/cl/ClTensorHandle.hpp>
+#endif
+
+#if ARMCOMPUTENEON_ENABLED
+#include <backends/neon/NeonTensorHandle.hpp>
+#endif
+
+using namespace armnn;
+
+namespace
+{
+
+using namespace std;
+
+template<typename IComputeTensorHandle>
+boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle*               tensorHandle,
+                                                             std::initializer_list<unsigned int> expectedDimensions)
+{
+    arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
+
+    auto infoNumDims = info->num_dimensions();
+    auto numExpectedDims = expectedDimensions.size();
+    if (infoNumDims != numExpectedDims)
+    {
+        boost::test_tools::predicate_result res(false);
+        res.message() << "Different number of dimensions [" << info->num_dimensions()
+                      << "!=" << expectedDimensions.size() << "]";
+        return res;
+    }
+
+    size_t i = info->num_dimensions() - 1;
+
+    for (unsigned int expectedDimension : expectedDimensions)
+    {
+        if (info->dimension(i) != expectedDimension)
+        {
+            boost::test_tools::predicate_result res(false);
+            res.message() << "Different dimension [" << info->dimension(i) << "!=" << expectedDimension << "]";
+            return res;
+        }
+
+        i--;
+    }
+
+    return true;
+}
+
+template<typename IComputeTensorHandle>
+void CreateMemCopyWorkloads(IWorkloadFactory& factory)
+{
+    Graph graph;
+    RefWorkloadFactory refFactory;
+
+    // Creates the layers we're testing.
+    Layer* const layer1 = graph.AddLayer<MemCopyLayer>("layer1");
+    Layer* const layer2 = graph.AddLayer<MemCopyLayer>("layer2");
+
+    // Creates extra layers.
+    Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+    Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+    // Connects up.
+    TensorInfo tensorInfo({2, 3}, DataType::Float32);
+    Connect(input, layer1, tensorInfo);
+    Connect(layer1, layer2, tensorInfo);
+    Connect(layer2, output, tensorInfo);
+
+    input->CreateTensorHandles(graph, refFactory);
+    layer1->CreateTensorHandles(graph, factory);
+    layer2->CreateTensorHandles(graph, refFactory);
+    output->CreateTensorHandles(graph, refFactory);
+
+    // make the workloads and check them
+    auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, graph, factory);
+    auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, graph, refFactory);
+
+    MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
+    BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
+    BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
+    auto inputHandle1  = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor1.m_Inputs[0]);
+    auto outputHandle1 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
+    BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
+    BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
+
+
+    MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
+    BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1);
+    BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
+    auto inputHandle2  = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
+    auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor2.m_Outputs[0]);
+    BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
+    BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
+}
+
+} //namespace
\ No newline at end of file
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
new file mode 100644
index 0000000..8ecdb10
--- /dev/null
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
+#include <backends/cl/ClWorkloadFactory.hpp>
+#include <backends/neon/NeonWorkloadFactory.hpp>
+#include <backends/test/MemCopyTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(MemCopyTestSuite)
+
+BOOST_AUTO_TEST_CASE(AclTypeConversions)
+{
+    arm_compute::Strides strides(1, 2, 3, 4);
+    armnn::TensorShape convertedStrides = armnn::armcomputetensorutils::GetStrides(strides);
+
+    BOOST_TEST(convertedStrides[0] == 4);
+    BOOST_TEST(convertedStrides[1] == 3);
+    BOOST_TEST(convertedStrides[2] == 2);
+    BOOST_TEST(convertedStrides[3] == 1);
+
+    arm_compute::TensorShape shape(5, 6, 7, 8);
+    armnn::TensorShape convertedshape = armnn::armcomputetensorutils::GetShape(shape);
+
+    BOOST_TEST(convertedshape[0] == 8);
+    BOOST_TEST(convertedshape[1] == 7);
+    BOOST_TEST(convertedshape[2] == 6);
+    BOOST_TEST(convertedshape[3] == 5);
+}
+
+#if ARMCOMPUTECL_ENABLED && ARMCOMPUTENEON_ENABLED
+
+BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu)
+{
+    LayerTestResult<float, 4> result = MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory>(false);
+    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+}
+
+BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
+{
+    LayerTestResult<float, 4> result = MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory>(false);
+    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+}
+
+BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
+{
+    LayerTestResult<float, 4> result = MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory>(true);
+    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+}
+
+BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
+{
+    LayerTestResult<float, 4> result = MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory>(true);
+    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+}
+
+#endif
+
+BOOST_AUTO_TEST_SUITE_END()