IVGCVSW-3539 Add CL support and tests for Stack

* Added CL backend support for Stack
* Added unit tests for Stack on the CL backend
* Refactored unit tests to support generic data types

Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
Change-Id: I38ee3e9d8947ea98a3104c982698001e704d7d89
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 619c6d0..98cdfaf 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -1381,17 +1381,16 @@
     return workload;
 }
 
-template <typename StackWorkload>
+template <typename StackWorkload, armnn::DataType DataType>
 std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory& factory,
                                                        armnn::Graph& graph,
                                                        const armnn::TensorShape& inputShape,
                                                        const armnn::TensorShape& outputShape,
                                                        unsigned int axis,
-                                                       unsigned int numInputs,
-                                                       armnn::DataType dataType)
+                                                       unsigned int numInputs)
 {
-    armnn::TensorInfo inputTensorInfo(inputShape, dataType);
-    armnn::TensorInfo outputTensorInfo(outputShape, dataType);
+    armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+    armnn::TensorInfo outputTensorInfo(outputShape, DataType);
 
     // Constructs the Stack layer.
     armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6d03da7..bd7f1c6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -193,7 +193,7 @@
                                                       const WorkloadInfo&            info) const;
 
     virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& Info) const;
+                                                   const WorkloadInfo& info) const;
 
     virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                           const WorkloadInfo& Info) const;
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index b737daf..0553962 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -46,6 +46,7 @@
 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
 #include "workloads/ClSpaceToDepthWorkload.hpp"
 #include "workloads/ClSplitterWorkload.hpp"
+#include "workloads/ClStackWorkload.hpp"
 #include "workloads/ClStridedSliceWorkload.hpp"
 #include "workloads/ClSubtractionWorkload.hpp"
 #include "workloads/ClTransposeConvolution2dWorkload.hpp"
@@ -671,6 +672,18 @@
     return true;
 }
 
+bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+                                      const TensorInfo& output,
+                                      const StackDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   inputs,
+                                   output,
+                                   descriptor);
+}
+
 bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
                                              const TensorInfo& output,
                                              const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 63a4daf..4879e8b 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -218,6 +218,11 @@
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+                          const TensorInfo& output,
+                          const StackDescriptor& descriptor,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsStridedSliceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 6ce87d8..4a593aa 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -433,4 +433,10 @@
     return MakeWorkload<ClSpaceToDepthWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const
+{
+    return MakeWorkload<ClStackWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 3b0ac82..8586435 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -182,6 +182,9 @@
     std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
 private:
     template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
     static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 57d7cb9..ee6447f 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -55,6 +55,7 @@
         workloads/ClSpaceToBatchNdWorkload.cpp \
         workloads/ClSpaceToDepthWorkload.cpp \
         workloads/ClSplitterWorkload.cpp \
+        workloads/ClStackWorkload.cpp \
         workloads/ClStridedSliceWorkload.cpp \
         workloads/ClSubtractionWorkload.cpp \
         workloads/ClTransposeConvolution2dWorkload.cpp
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index de13390..f453ccc 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -936,4 +936,42 @@
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
 }
 
+template <armnn::DataType DataType>
+static void ClCreateStackWorkloadTest(const std::initializer_list<unsigned int>& inputShape,
+                                      const std::initializer_list<unsigned int>& outputShape,
+                                      unsigned int axis,
+                                      unsigned int numInputs)
+{
+    armnn::Graph graph;
+    ClWorkloadFactory factory =
+            ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
+
+    auto workload = CreateStackWorkloadTest<ClStackWorkload, DataType>(factory,
+                                                                       graph,
+                                                                       TensorShape(inputShape),
+                                                                       TensorShape(outputShape),
+                                                                       axis,
+                                                                       numInputs);
+
+    // Check inputs and output are as expected
+    StackQueueDescriptor queueDescriptor = workload->GetData();
+    for (unsigned int i = 0; i < numInputs; ++i)
+    {
+        auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
+        BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+    }
+    auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+}
+
+BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+{
+    ClCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+}
+
+BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+{
+    ClCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 37af471..dd4c16e 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -496,6 +496,13 @@
 ARMNN_AUTO_TEST_CASE(SpaceToDepthNHWCQSymm16, SpaceToDepthNHWCQSymm16Test)
 ARMNN_AUTO_TEST_CASE(SpaceToDepthNCHWQSymm16, SpaceToDepthNCHWQSymm16Test)
 
+// Stack
+ARMNN_AUTO_TEST_CASE(Stack0Axis,               Stack0AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack4dOutput1Axis,       Stack4dOutput1AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack4dOutput2Axis,       Stack4dOutput2AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack4dOutput3Axis,       Stack4dOutput3AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack3dOutput1Axis3Input, Stack3dOutput1Axis3InputTest<armnn::DataType::Float32>)
+
 // Strided Slice
 ARMNN_AUTO_TEST_CASE(StridedSlice4DFloat32, StridedSlice4DFloat32Test)
 ARMNN_AUTO_TEST_CASE(StridedSlice4DReverseFloat32, StridedSlice4DReverseFloat32Test)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 2a3b1ad..49a8b17 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -74,6 +74,8 @@
     ClSpaceToDepthWorkload.hpp
     ClSplitterWorkload.cpp
     ClSplitterWorkload.hpp
+    ClStackWorkload.cpp
+    ClStackWorkload.hpp
     ClStridedSliceWorkload.cpp
     ClStridedSliceWorkload.hpp
     ClSubtractionWorkload.cpp
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
new file mode 100644
index 0000000..3ba698e
--- /dev/null
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ClStackWorkload.hpp"
+#include "ClWorkloadUtils.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <cl/ClTensorHandle.hpp>
+#include <cl/ClLayerSupport.hpp>
+
+#include <arm_compute/core/Types.h>
+
+#include <boost/numeric/conversion/cast.hpp>
+#include <boost/polymorphic_pointer_cast.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+namespace
+{
+int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
+{
+    const int intAxis = boost::numeric_cast<int>(axis);
+    return boost::numeric_cast<int>(inputDimensions) - intAxis;
+}
+} //namespace
+
+arm_compute::Status ClStackWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+                                            const TensorInfo& output,
+                                            const StackDescriptor& descriptor)
+{
+    std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
+    arm_compute::TensorInfo aclInputInfo;
+    for (const TensorInfo* input : inputs)
+    {
+        aclInputInfo = BuildArmComputeTensorInfo(*input);
+        aclInputPtrs.emplace_back(&aclInputInfo);
+    }
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+    int aclAxis = CalcAxis(descriptor.m_Axis, descriptor.m_InputShape.GetNumDimensions());
+
+    return arm_compute::CLStackLayer::validate(aclInputPtrs, aclAxis, &aclOutputInfo);
+}
+
+ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, const WorkloadInfo& info)
+: BaseWorkload<StackQueueDescriptor>(descriptor, info)
+{
+    std::vector<arm_compute::ICLTensor*> aclInputs;
+    for (auto input : m_Data.m_Inputs)
+    {
+        arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+        aclInputs.emplace_back(&aclInput);
+    }
+    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+                                                                         m_Data.m_Outputs[0])->GetTensor();
+
+    m_Layer.reset(new arm_compute::CLStackLayer());
+    int aclAxis = CalcAxis(descriptor.m_Parameters.m_Axis, descriptor.m_Parameters.m_InputShape.GetNumDimensions());
+    m_Layer->configure(aclInputs, aclAxis, &output);
+}
+
+void ClStackWorkload::Execute() const
+{
+    if (m_Layer)
+    {
+        ARMNN_SCOPED_PROFILING_EVENT_CL("ClStackWorkload_Execute");
+        m_Layer->run();
+    }
+}
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/workloads/ClStackWorkload.hpp b/src/backends/cl/workloads/ClStackWorkload.hpp
new file mode 100644
index 0000000..7500869
--- /dev/null
+++ b/src/backends/cl/workloads/ClStackWorkload.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+arm_compute::Status ClStackWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+                                            const TensorInfo& output,
+                                            const StackDescriptor& descriptor);
+
+class ClStackWorkload : public BaseWorkload<StackQueueDescriptor>
+{
+public:
+    ClStackWorkload(const StackQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    void Execute() const override;
+
+private:
+    mutable std::unique_ptr<arm_compute::CLStackLayer> m_Layer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index a64dea2..03dffc4 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -36,6 +36,7 @@
 #include "ClSpaceToBatchNdWorkload.hpp"
 #include "ClSpaceToDepthWorkload.hpp"
 #include "ClSplitterWorkload.hpp"
+#include "ClStackWorkload.hpp"
 #include "ClStridedSliceWorkload.hpp"
 #include "ClSubtractionWorkload.hpp"
 #include "ClConvertFp16ToFp32Workload.hpp"
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index f7999d0..04c9acb 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -990,41 +990,45 @@
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
 }
 
+template <armnn::DataType DataType>
 static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
                                        const armnn::TensorShape& outputShape,
                                        unsigned int axis,
-                                       unsigned int numInputs,
-                                       armnn::DataType dataType)
+                                       unsigned int numInputs)
 {
     armnn::Graph graph;
     RefWorkloadFactory factory;
-    auto workload = CreateStackWorkloadTest<RefStackWorkload>(factory,
-                                                              graph,
-                                                              inputShape,
-                                                              outputShape,
-                                                              axis,
-                                                              numInputs,
-                                                              dataType);
+    auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
+                                                                        graph,
+                                                                        inputShape,
+                                                                        outputShape,
+                                                                        axis,
+                                                                        numInputs);
 
-    // Check output is as expected
-    auto queueDescriptor = workload->GetData();
+    // Check inputs and output are as expected
+    StackQueueDescriptor queueDescriptor = workload->GetData();
+    for (unsigned int i = 0; i < numInputs; ++i)
+    {
+        auto inputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
+        BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
+    }
     auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
+    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
 }
 
 BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
 {
-    RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2, armnn::DataType::Float32);
+    RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
 {
-    RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2, armnn::DataType::QuantisedAsymm8);
+    RefCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
 {
-    RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2, armnn::DataType::QuantisedSymm16);
+    RefCreateStackWorkloadTest<armnn::DataType::QuantisedSymm16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 BOOST_AUTO_TEST_SUITE_END()