IVGCVSW-3878 Add reference workload for SLICE

* Added reference workload implementation and layer tests
  for all supported tensor dimensions (1d, 2d, 3d, 4d)

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I40eb300828933e9183027281105d1a7e597d1569
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 228f8a8..572f617 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1374,6 +1374,33 @@
     return supported;
 }
 
+bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const SliceDescriptor& descriptor,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    bool supported = true;
+
+    std::array<DataType, 3> supportedTypes =
+    {
+        DataType::Float32,
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference Slice: input type not supported");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference Slice: output type not supported");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference Slice: input and output types are mismatched");
+
+    return supported;
+}
+
 bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 26c60dc..8200058 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -219,6 +219,11 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSliceSupported(const TensorInfo& input,
+                          const TensorInfo& output,
+                          const SliceDescriptor& descriptor,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 52dffcc..055c8da 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -457,4 +457,10 @@
     return std::make_unique<RefAbsWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    return std::make_unique<RefSliceWorkload>(descriptor, info);
+}
+
 } // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 5851528..2c40053 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -212,6 +212,9 @@
     std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
 private:
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index b1f0a03..b2ec748 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -69,6 +69,7 @@
         workloads/RefResizeBilinearWorkload.cpp \
         workloads/RefResizeWorkload.cpp \
         workloads/RefRsqrtWorkload.cpp \
+        workloads/RefSliceWorkload.cpp \
         workloads/RefSoftmaxWorkload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
         workloads/RefSpaceToDepthWorkload.cpp \
@@ -78,6 +79,7 @@
         workloads/RefTransposeConvolution2dWorkload.cpp \
         workloads/Resize.cpp \
         workloads/Rsqrt.cpp \
+        workloads/Slice.cpp \
         workloads/SpaceToBatchNd.cpp \
         workloads/SpaceToDepth.cpp \
         workloads/Stack.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index eb56dde..afeb8a4 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1252,6 +1252,22 @@
 ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QuantisedAsymm8>)
 ARMNN_AUTO_TEST_CASE(PreluInt16,   PreluTest<DataType::QuantisedSymm16>)
 
+// Slice
+ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test)
+
+ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
+
 // TransposeConvolution2d
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
                      SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 23d6024..3077095 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -107,6 +107,8 @@
     RefResizeWorkload.hpp
     RefRsqrtWorkload.cpp
     RefRsqrtWorkload.hpp
+    RefSliceWorkload.cpp
+    RefSliceWorkload.hpp
     RefSoftmaxWorkload.cpp
     RefSoftmaxWorkload.hpp
     RefSpaceToBatchNdWorkload.cpp
@@ -127,6 +129,8 @@
     Resize.hpp
     Rsqrt.cpp
     Rsqrt.hpp
+    Slice.cpp
+    Slice.hpp
     Softmax.cpp
     Softmax.hpp
     SpaceToBatchNd.hpp
diff --git a/src/backends/reference/workloads/RefSliceWorkload.cpp b/src/backends/reference/workloads/RefSliceWorkload.cpp
new file mode 100644
index 0000000..2e44845
--- /dev/null
+++ b/src/backends/reference/workloads/RefSliceWorkload.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefSliceWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "Slice.hpp"
+
+#include <Profiling.hpp>
+
+namespace armnn
+{
+
+void RefSliceWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSliceWorkload_Execute");
+
+    const TensorInfo& inputInfo  = GetTensorInfo(m_Data.m_Inputs[0]);
+
+    Slice(inputInfo,
+          m_Data.m_Parameters,
+          m_Data.m_Inputs[0]->Map(),
+          m_Data.m_Outputs[0]->Map(),
+          GetDataTypeSize(inputInfo.GetDataType()));
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefSliceWorkload.hpp b/src/backends/reference/workloads/RefSliceWorkload.hpp
new file mode 100644
index 0000000..006c7b7
--- /dev/null
+++ b/src/backends/reference/workloads/RefSliceWorkload.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefSliceWorkload : public BaseWorkload<SliceQueueDescriptor>
+{
+public:
+    using BaseWorkload<SliceQueueDescriptor>::BaseWorkload;
+
+    virtual void Execute() const override;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 1ec349e..959226a 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -41,10 +41,11 @@
 #include "RefPadWorkload.hpp"
 #include "RefPreluWorkload.hpp"
 #include "RefQuantizeWorkload.hpp"
+#include "RefReshapeWorkload.hpp"
 #include "RefResizeBilinearWorkload.hpp"
 #include "RefResizeWorkload.hpp"
 #include "RefRsqrtWorkload.hpp"
-#include "RefReshapeWorkload.hpp"
+#include "RefSliceWorkload.hpp"
 #include "RefSplitterWorkload.hpp"
 #include "RefSoftmaxWorkload.hpp"
 #include "RefSpaceToBatchNdWorkload.hpp"
@@ -56,4 +57,4 @@
 #include "Resize.hpp"
 #include "Softmax.hpp"
 #include "Splitter.hpp"
-#include "TensorBufferArrayView.hpp"
\ No newline at end of file
+#include "TensorBufferArrayView.hpp"
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
new file mode 100644
index 0000000..c7ca3b1
--- /dev/null
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -0,0 +1,95 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Slice.hpp"
+
+#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+namespace armnn
+{
+
+void Slice(const TensorInfo& inputInfo,
+           const SliceDescriptor& descriptor,
+           const void* inputData,
+           void* outputData,
+           unsigned int dataTypeSize)
+{
+    const TensorShape& inputShape = inputInfo.GetShape();
+    const unsigned int numDims    = inputShape.GetNumDimensions();
+
+    BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
+    BOOST_ASSERT(descriptor.m_Size.size()  == numDims);
+
+    constexpr unsigned int maxNumDims = 4;
+    BOOST_ASSERT(numDims <= maxNumDims);
+
+    std::vector<unsigned int> paddedInput(4);
+    std::vector<unsigned int> paddedBegin(4);
+    std::vector<unsigned int> paddedSize (4);
+
+    const unsigned int numPaddingDims = maxNumDims - numDims;
+    for (unsigned int i = 0u; i < maxNumDims; ++i)
+    {
+        if (i < numPaddingDims)
+        {
+            paddedInput[i] = 1u;
+            paddedBegin[i] = 0u;
+            paddedSize[i]  = 1u;
+        }
+        else
+        {
+            const unsigned int j = i - numPaddingDims;
+            paddedInput[i] = inputShape[j];
+            paddedBegin[i] = descriptor.m_Begin[j];
+            paddedSize[i]  = descriptor.m_Size[j];
+        }
+    }
+
+    unsigned int dim0 = paddedInput[0];
+    unsigned int dim1 = paddedInput[1];
+    unsigned int dim2 = paddedInput[2];
+    unsigned int dim3 = paddedInput[3];
+
+    unsigned int begin0 = paddedBegin[0];
+    unsigned int begin1 = paddedBegin[1];
+    unsigned int begin2 = paddedBegin[2];
+    unsigned int begin3 = paddedBegin[3];
+
+    unsigned int size0  = paddedSize[0];
+    unsigned int size1  = paddedSize[1];
+    unsigned int size2  = paddedSize[2];
+    unsigned int size3  = paddedSize[3];
+
+    BOOST_ASSERT(begin0 + size0 <= dim0);
+    BOOST_ASSERT(begin1 + size1 <= dim1);
+    BOOST_ASSERT(begin2 + size2 <= dim2);
+    BOOST_ASSERT(begin3 + size3 <= dim3);
+
+    const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
+    unsigned char* output      = reinterpret_cast<unsigned char*>(outputData);
+
+    boost::ignore_unused(dim0);
+    for (unsigned int idx0 = begin0; idx0 < begin0 + size0; ++idx0)
+    {
+        for (unsigned int idx1 = begin1; idx1 < begin1 + size1; ++idx1)
+        {
+            for (unsigned int idx2 = begin2; idx2 < begin2 + size2; ++idx2)
+            {
+                for (unsigned int idx3 = begin3; idx3 < begin3 + size3; ++idx3)
+                {
+                    const unsigned int inputOffset =
+                        (((idx0 * dim1 + idx1) * dim2 + idx2) * dim3 + idx3) * dataTypeSize;
+
+                    ::memcpy(output, input + inputOffset, dataTypeSize);
+                    output += dataTypeSize;
+                }
+            }
+        }
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/Slice.hpp b/src/backends/reference/workloads/Slice.hpp
new file mode 100644
index 0000000..823f16c
--- /dev/null
+++ b/src/backends/reference/workloads/Slice.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn
+{
+
+void Slice(const TensorInfo& inputInfo,
+           const SliceDescriptor& descriptor,
+           const void* inputData,
+           void* outputData,
+           unsigned int dataTypeSize);
+
+} // namespace armnn