IVGCVSW-3728 Add CL workload for Slice

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I2ed38744e1e8c839b369be8d44c0cffccfeb370e
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 7a1c573..3e6ccb4 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -49,6 +49,7 @@
 #include "workloads/ClRsqrtWorkload.hpp"
 #include "workloads/ClQuantizedLstmWorkload.hpp"
 #include "workloads/ClQuantizeWorkload.hpp"
+#include "workloads/ClSliceWorkload.hpp"
 #include "workloads/ClSoftmaxBaseWorkload.hpp"
 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
 #include "workloads/ClSpaceToDepthWorkload.hpp"
@@ -692,6 +693,14 @@
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, reasonIfUnsupported, input, output);
 }
 
+bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const SliceDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
+}
+
 bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
                                         const TensorInfo& output,
                                         const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 219ce3b..a21589d 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -227,6 +227,11 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSliceSupported(const TensorInfo& input,
+                          const TensorInfo& output,
+                          const SliceDescriptor& descriptor,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 2a7c8fe..8045805 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -458,6 +458,12 @@
     return MakeWorkload<ClRsqrtWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const
+{
+    return MakeWorkload<ClSliceWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 6c3935d..dd01e61 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -185,6 +185,9 @@
     std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index c439186..4182b94 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -57,6 +57,7 @@
         workloads/ClReshapeWorkload.cpp \
         workloads/ClResizeWorkload.cpp \
         workloads/ClRsqrtWorkload.cpp \
+        workloads/ClSliceWorkload.cpp \
         workloads/ClSoftmaxBaseWorkload.cpp \
         workloads/ClSoftmaxFloatWorkload.cpp \
         workloads/ClSoftmaxUint8Workload.cpp \
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 7d1fb8b..6799435 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -562,6 +562,20 @@
 ARMNN_AUTO_TEST_CASE(StackOutput5D,        StackOutput5DFloat32Test)
 ARMNN_AUTO_TEST_CASE(StackFloat16,         StackFloat16Test)
 
+// Slice
+ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
+
 // Strided Slice
 ARMNN_AUTO_TEST_CASE(StridedSlice4dFloat32, StridedSlice4dFloat32Test)
 ARMNN_AUTO_TEST_CASE(StridedSlice4dReverseFloat32, StridedSlice4dReverseFloat32Test)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index c844512..de62ca9 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -74,6 +74,8 @@
     ClResizeWorkload.hpp
     ClRsqrtWorkload.cpp
     ClRsqrtWorkload.hpp
+    ClSliceWorkload.cpp
+    ClSliceWorkload.hpp
     ClSoftmaxBaseWorkload.cpp
     ClSoftmaxBaseWorkload.hpp
     ClSoftmaxFloatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClSliceWorkload.cpp b/src/backends/cl/workloads/ClSliceWorkload.cpp
new file mode 100644
index 0000000..fa99e7f
--- /dev/null
+++ b/src/backends/cl/workloads/ClSliceWorkload.cpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClSliceWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status ClSliceWorkloadValidate(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const SliceDescriptor& descriptor)
+{
+    const arm_compute::TensorInfo aclInput  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    arm_compute::Coordinates starts;
+    arm_compute::Coordinates ends;
+
+    std::tie(starts, ends) = SetClSliceData(descriptor.m_Begin, descriptor.m_Size);
+
+    return arm_compute::CLSlice::validate(&aclInput, &aclOutput, starts, ends);
+}
+
+ClSliceWorkload::ClSliceWorkload(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : BaseWorkload<SliceQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClSliceWorkload", 1, 1);
+
+    arm_compute::ICLTensor& input  = boost::polymorphic_downcast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = boost::polymorphic_downcast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::Coordinates starts;
+    arm_compute::Coordinates ends;
+
+    std::tie(starts, ends) = SetClSliceData(m_Data.m_Parameters.m_Begin, m_Data.m_Parameters.m_Size);
+
+    m_SliceFunction.configure(&input, &output, starts, ends);
+}
+
+void ClSliceWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClSliceWorkload_Execute");
+    RunClFunction(m_SliceFunction, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClSliceWorkload.hpp b/src/backends/cl/workloads/ClSliceWorkload.hpp
new file mode 100644
index 0000000..3460b77
--- /dev/null
+++ b/src/backends/cl/workloads/ClSliceWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLSlice.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClSliceWorkloadValidate(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const SliceDescriptor& descriptor);
+
+class ClSliceWorkload : public BaseWorkload<SliceQueueDescriptor>
+{
+public:
+    ClSliceWorkload(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLSlice m_SliceFunction;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 32dacdf..f5e60e6 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -60,6 +60,30 @@
     return std::make_tuple(starts, ends, strides);
 }
 
+inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
+                           const std::vector<unsigned int>& m_size)
+{
+    // This function must translate the size vector given to an end vector
+    // expected by the ACL NESlice workload
+    arm_compute::Coordinates starts;
+    arm_compute::Coordinates ends;
+
+    unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
+
+    // For strided slices, we have the relationship size = (end - begin) / stride
+    // For slice, we assume stride to be a vector of all ones, yielding the formula
+    // size = (end - begin) therefore we know end = size + begin
+    for (unsigned int i = 0; i < num_dims; i++)
+    {
+        unsigned int revertedIndex = num_dims - i - 1;
+
+        starts.set(i, static_cast<int>(m_begin[revertedIndex]));
+        ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
+    }
+
+    return std::make_tuple(starts, ends);
+}
+
 inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
                                              const ConstCpuTensorHandle* handle)
 {
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index dd8c699..014dc3f 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -37,6 +37,7 @@
 #include "ClReshapeWorkload.hpp"
 #include "ClResizeWorkload.hpp"
 #include "ClRsqrtWorkload.hpp"
+#include "ClSliceWorkload.hpp"
 #include "ClSoftmaxFloatWorkload.hpp"
 #include "ClSoftmaxUint8Workload.hpp"
 #include "ClSpaceToBatchNdWorkload.hpp"