IVGCVSW-3729 Added neon slice workload and supporting neon layer tests

* Support added for ACL neon slice workload
* Utility function created to translate ArmNN slice layer params to ACL neon slice layer equivalent
* Neon slice layer tests added as per SliceTestImpl.hpp

Signed-off-by: josh minor <josh.minor@arm.com>
Change-Id: Id583465311879af139e8e977f16ed2280c937ac7
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 8cf97d3..f63946e 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -76,4 +76,28 @@
     return std::make_tuple(starts, ends, strides);
 }
 
+inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
+                             const std::vector<unsigned int>& m_size)
+{
+    // This function must translate the size vector given to an end vector
+    // expected by the ACL NESlice workload
+    arm_compute::Coordinates starts;
+    arm_compute::Coordinates ends;
+
+    unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
+
+    // For strided slices, we have the relationship size = (end - begin) / stride
+    // For slice, we assume stride to be a vector of all ones, yielding the formula
+    // size = (end - begin) therefore we know end = size + begin
+    for (unsigned int i = 0; i < num_dims; i++)
+    {
+        unsigned int revertedIndex = num_dims - i - 1;
+
+        starts.set(i, static_cast<int>(m_begin[revertedIndex]));
+        ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
+    }
+
+    return std::make_tuple(starts, ends);
+}
+
 } //namespace armnn