IVGCVSW-3875 Add frontend for SLICE layer

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Iebe675a0cee02db6f133d48ce58cbc1e233061db
diff --git a/Android.mk b/Android.mk
index 3640e0c..89956dd 100644
--- a/Android.mk
+++ b/Android.mk
@@ -157,9 +157,10 @@
         src/armnn/layers/ReshapeLayer.cpp \
         src/armnn/layers/ResizeLayer.cpp \
         src/armnn/layers/RsqrtLayer.cpp \
+        src/armnn/layers/SliceLayer.cpp \
+        src/armnn/layers/SoftmaxLayer.cpp \
         src/armnn/layers/SpaceToBatchNdLayer.cpp \
         src/armnn/layers/SpaceToDepthLayer.cpp \
-        src/armnn/layers/SoftmaxLayer.cpp \
         src/armnn/layers/SplitterLayer.cpp \
         src/armnn/layers/StackLayer.cpp \
         src/armnn/layers/StridedSliceLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ef79ee1..a04f30b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -319,6 +319,8 @@
     src/armnn/layers/ResizeLayer.cpp
     src/armnn/layers/RsqrtLayer.cpp
     src/armnn/layers/RsqrtLayer.hpp
+    src/armnn/layers/SliceLayer.cpp
+    src/armnn/layers/SliceLayer.hpp
     src/armnn/layers/SoftmaxLayer.hpp
     src/armnn/layers/SoftmaxLayer.cpp
     src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -846,4 +848,3 @@
         )
 
 endif()
-
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index e871e89..8d382f7 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -667,6 +667,24 @@
     float m_PadValue;
 };
 
+/// A SliceDescriptor for the SliceLayer.
+struct SliceDescriptor
+{
+    SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
+        : m_Begin(begin)
+        , m_Size(size)
+    {}
+
+    SliceDescriptor() : SliceDescriptor({}, {})
+    {}
+
+    /// Beginning indices of the slice in each dimension.
+    std::vector<unsigned int> m_Begin;
+
+    /// Size of the slice in each dimension.
+    std::vector<unsigned int> m_Size;
+};
+
 /// A StackDescriptor for the StackLayer.
 struct StackDescriptor
 {
@@ -786,4 +804,4 @@
     DataLayout m_DataLayout;
 };
 
-} // namespace armnn
\ No newline at end of file
+} // namespace armnn
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 8f81b4f..bddb0ca 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -31,6 +31,7 @@
 struct SoftmaxDescriptor;
 struct SpaceToBatchNdDescriptor;
 struct SpaceToDepthDescriptor;
+struct SliceDescriptor;
 struct StackDescriptor;
 struct StridedSliceDescriptor;
 struct TransposeConvolution2dDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index d168226..cab2df1 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -269,6 +269,11 @@
                                   const TensorInfo& output,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsSliceSupported(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  const SliceDescriptor& descriptor,
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsSoftmaxSupported(const TensorInfo& input,
                                     const TensorInfo& output,
                                     const SoftmaxDescriptor& descriptor,
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index a504a41..6c09773 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -357,6 +357,14 @@
     virtual void VisitRsqrtLayer(const IConnectableLayer* layer,
                                  const char* name = nullptr) = 0;
 
+    /// Function that a slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
+    /// @param name - Optional name for the layer.
+    virtual void VisitSliceLayer(const IConnectableLayer* layer,
+                                 const SliceDescriptor& sliceDescriptor,
+                                 const char* name = nullptr) = 0;
+
 
     /// Function that a softmax layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index cd1b7a6..09026ad 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -237,6 +237,12 @@
     virtual IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
         const char* name = nullptr) = 0;
 
+    /// Adds a slice layer to the network.
+    /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr) = 0;
+
     /// Adds a softmax layer to the network.
     /// If the data type is QAsymm8, then the output quantization parameters
     /// must have a scale of 1/256 and an offset of 0
@@ -253,8 +259,8 @@
     ///                             the first output, second view to the second output, etc....
     /// @param name - Optional name for the layer.
     /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor
-        , const char* name = nullptr) = 0;
+    virtual IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+                                                const char* name = nullptr) = 0;
 
     /// Adds a merge layer to the network.
     /// @param name - Optional name for the layer.
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 0739b43..d626c71 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -182,6 +182,10 @@
     void VisitRsqrtLayer(const IConnectableLayer*,
                          const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitSliceLayer(const IConnectableLayer*,
+                         const SliceDescriptor&,
+                         const char*) override { DefaultPolicy::Apply(__func__); }
+
     void VisitSoftmaxLayer(const IConnectableLayer*,
                            const SoftmaxDescriptor&,
                            const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 98308f9..1e05fff 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -58,6 +58,7 @@
     Reshape,
     Resize,
     Rsqrt,
+    Slice,
     Softmax,
     SpaceToBatchNd,
     SpaceToDepth,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 6e4cf6a..a98c104 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -50,6 +50,7 @@
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeLayer.hpp"
 #include "layers/RsqrtLayer.hpp"
+#include "layers/SliceLayer.hpp"
 #include "layers/SoftmaxLayer.hpp"
 #include "layers/SpaceToBatchNdLayer.hpp"
 #include "layers/SpaceToDepthLayer.hpp"
@@ -131,6 +132,7 @@
 DECLARE_LAYER(Reshape)
 DECLARE_LAYER(Resize)
 DECLARE_LAYER(Rsqrt)
+DECLARE_LAYER(Slice)
 DECLARE_LAYER(Softmax)
 DECLARE_LAYER(SpaceToBatchNd)
 DECLARE_LAYER(SpaceToDepth)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 6971cb8..c055407 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1129,6 +1129,11 @@
     return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
 }
 
+IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
+{
+    return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
+}
+
 IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
     const char* name)
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index aac875a..274cc1a 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -117,6 +117,8 @@
     IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
         const char* name = nullptr) override;
 
+    IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr) override;
+
     IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
         const char* name = nullptr) override;
 
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
new file mode 100644
index 0000000..8ea5fd8
--- /dev/null
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -0,0 +1,66 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SliceLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+namespace armnn
+{
+
+SliceLayer::SliceLayer(const SliceDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::Slice, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> SliceLayer::CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const
+{
+    SliceQueueDescriptor descriptor;
+    return factory.CreateSlice(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+SliceLayer* SliceLayer::Clone(Graph& graph) const
+{
+    return CloneBase<SliceLayer>(graph, m_Param, GetName());
+}
+
+void SliceLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+            "SliceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+            GetOutputSlot(0).GetTensorInfo().GetShape(),
+            inferredShapes[0]);
+}
+
+std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 1);
+
+    TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
+
+    return std::vector<TensorShape>({ outputShape });
+}
+
+void SliceLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitSliceLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
new file mode 100644
index 0000000..38f0747
--- /dev/null
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class SliceLayer : public LayerWithParameters<SliceDescriptor>
+{
+public:
+    /// Makes a workload for the Slice type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    SliceLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref SliceLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a SliceLayer.
+    /// @param [in] param SliceDescriptor to configure the resize  operation.
+    /// @param [in] name Optional name for the layer.
+    SliceLayer(const SliceDescriptor& param, const char* name);
+
+    /// Default destructor.
+    ~SliceLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 4bb9614..c4c4a47 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -10,6 +10,7 @@
 
 BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor)
 
+// Addition
 BOOST_AUTO_TEST_CASE(CheckAdditionLayerVisitorName)
 {
     TestAdditionLayerVisitor visitor("AdditionLayer");
@@ -28,42 +29,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckMultiplicationLayerVisitorName)
-{
-    TestMultiplicationLayerVisitor visitor("MultiplicationLayer");
-    Network net;
-
-    IConnectableLayer *const layer = net.AddMultiplicationLayer("MultiplicationLayer");
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckMultiplicationLayerVisitorNameNullptr)
-{
-    TestMultiplicationLayerVisitor visitor;
-    Network net;
-
-    IConnectableLayer *const layer = net.AddMultiplicationLayer();
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckFloorLayerVisitorName)
-{
-    TestFloorLayerVisitor visitor("FloorLayer");
-    Network net;
-
-    IConnectableLayer *const layer = net.AddFloorLayer("FloorLayer");
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckFloorLayerVisitorNameNullptr)
-{
-    TestFloorLayerVisitor visitor;
-    Network net;
-
-    IConnectableLayer *const layer = net.AddFloorLayer();
-    layer->Accept(visitor);
-}
-
+// Division
 BOOST_AUTO_TEST_CASE(CheckDivisionLayerVisitorName)
 {
     TestDivisionLayerVisitor visitor("DivisionLayer");
@@ -82,78 +48,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckSubtractionLayerVisitorName)
-{
-    TestSubtractionLayerVisitor visitor("SubtractionLayer");
-    Network net;
-
-    IConnectableLayer *const layer = net.AddSubtractionLayer("SubtractionLayer");
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckSubtractionLayerVisitorNameNullptr)
-{
-    TestSubtractionLayerVisitor visitor;
-    Network net;
-
-    IConnectableLayer *const layer = net.AddSubtractionLayer();
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckMaximumLayerVisitorName)
-{
-    TestMaximumLayerVisitor visitor("MaximumLayer");
-    Network net;
-
-    IConnectableLayer *const layer = net.AddMaximumLayer("MaximumLayer");
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckMaximumLayerVisitorNameNullptr)
-{
-    TestMaximumLayerVisitor visitor;
-    Network net;
-
-    IConnectableLayer *const layer = net.AddMaximumLayer();
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckMinimumLayerVisitorName)
-{
-    TestMinimumLayerVisitor visitor("MinimumLayer");
-    Network net;
-
-    IConnectableLayer *const layer = net.AddMinimumLayer("MinimumLayer");
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckMinimumLayerVisitorNameNullptr)
-{
-    TestMinimumLayerVisitor visitor;
-    Network net;
-
-    IConnectableLayer *const layer = net.AddMinimumLayer();
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckGreaterLayerVisitorName)
-{
-    TestGreaterLayerVisitor visitor("GreaterLayer");
-    Network net;
-
-    IConnectableLayer *const layer = net.AddGreaterLayer("GreaterLayer");
-    layer->Accept(visitor);
-}
-
-BOOST_AUTO_TEST_CASE(CheckGreaterLayerVisitorNameNullptr)
-{
-    TestGreaterLayerVisitor visitor;
-    Network net;
-
-    IConnectableLayer *const layer = net.AddGreaterLayer();
-    layer->Accept(visitor);
-}
-
+// Equal
 BOOST_AUTO_TEST_CASE(CheckEqualLayerVisitorName)
 {
     TestEqualLayerVisitor visitor("EqualLayer");
@@ -172,24 +67,26 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckRsqrtLayerVisitorName)
+// Floor
+BOOST_AUTO_TEST_CASE(CheckFloorLayerVisitorName)
 {
-    TestRsqrtLayerVisitor visitor("RsqrtLayer");
+    TestFloorLayerVisitor visitor("FloorLayer");
     Network net;
 
-    IConnectableLayer *const layer = net.AddRsqrtLayer("RsqrtLayer");
+    IConnectableLayer *const layer = net.AddFloorLayer("FloorLayer");
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckRsqrtLayerVisitorNameNullptr)
+BOOST_AUTO_TEST_CASE(CheckFloorLayerVisitorNameNullptr)
 {
-    TestRsqrtLayerVisitor visitor;
+    TestFloorLayerVisitor visitor;
     Network net;
 
-    IConnectableLayer *const layer = net.AddRsqrtLayer();
+    IConnectableLayer *const layer = net.AddFloorLayer();
     layer->Accept(visitor);
 }
 
+// Gather
 BOOST_AUTO_TEST_CASE(CheckGatherLayerVisitorName)
 {
     TestGatherLayerVisitor visitor("GatherLayer");
@@ -208,6 +105,139 @@
     layer->Accept(visitor);
 }
 
+// Greater
+BOOST_AUTO_TEST_CASE(CheckGreaterLayerVisitorName)
+{
+    TestGreaterLayerVisitor visitor("GreaterLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddGreaterLayer("GreaterLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckGreaterLayerVisitorNameNullptr)
+{
+    TestGreaterLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddGreaterLayer();
+    layer->Accept(visitor);
+}
+
+// Maximum
+BOOST_AUTO_TEST_CASE(CheckMaximumLayerVisitorName)
+{
+    TestMaximumLayerVisitor visitor("MaximumLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddMaximumLayer("MaximumLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckMaximumLayerVisitorNameNullptr)
+{
+    TestMaximumLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddMaximumLayer();
+    layer->Accept(visitor);
+}
+
+// Minimum
+BOOST_AUTO_TEST_CASE(CheckMinimumLayerVisitorName)
+{
+    TestMinimumLayerVisitor visitor("MinimumLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddMinimumLayer("MinimumLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckMinimumLayerVisitorNameNullptr)
+{
+    TestMinimumLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddMinimumLayer();
+    layer->Accept(visitor);
+}
+
+// Multiplication
+BOOST_AUTO_TEST_CASE(CheckMultiplicationLayerVisitorName)
+{
+    TestMultiplicationLayerVisitor visitor("MultiplicationLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddMultiplicationLayer("MultiplicationLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckMultiplicationLayerVisitorNameNullptr)
+{
+    TestMultiplicationLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddMultiplicationLayer();
+    layer->Accept(visitor);
+}
+
+// Rsqrt
+BOOST_AUTO_TEST_CASE(CheckRsqrtLayerVisitorName)
+{
+    TestRsqrtLayerVisitor visitor("RsqrtLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddRsqrtLayer("RsqrtLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckRsqrtLayerVisitorNameNullptr)
+{
+    TestRsqrtLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddRsqrtLayer();
+    layer->Accept(visitor);
+}
+
+// Slice
+BOOST_AUTO_TEST_CASE(CheckSliceLayerVisitorName)
+{
+    TestSliceLayerVisitor visitor("SliceLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddSliceLayer(SliceDescriptor(), "SliceLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckSliceLayerVisitorNameNullptr)
+{
+    TestSliceLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddSliceLayer(SliceDescriptor());
+    layer->Accept(visitor);
+}
+
+// Subtraction
+BOOST_AUTO_TEST_CASE(CheckSubtractionLayerVisitorName)
+{
+    TestSubtractionLayerVisitor visitor("SubtractionLayer");
+    Network net;
+
+    IConnectableLayer *const layer = net.AddSubtractionLayer("SubtractionLayer");
+    layer->Accept(visitor);
+}
+
+BOOST_AUTO_TEST_CASE(CheckSubtractionLayerVisitorNameNullptr)
+{
+    TestSubtractionLayerVisitor visitor;
+    Network net;
+
+    IConnectableLayer *const layer = net.AddSubtractionLayer();
+    layer->Accept(visitor);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
 
-} //namespace armnn
\ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.hpp b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
index c0037ae..dec0d15 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.hpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.hpp
@@ -22,13 +22,25 @@
     };
 };
 
-class TestMultiplicationLayerVisitor : public TestLayerVisitor
+class TestDivisionLayerVisitor : public TestLayerVisitor
 {
 public:
-    explicit TestMultiplicationLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+    explicit TestDivisionLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
 
-    void VisitMultiplicationLayer(const IConnectableLayer* layer,
-                                  const char* name = nullptr) override {
+    void VisitDivisionLayer(const IConnectableLayer* layer,
+                            const char* name = nullptr) override {
+        CheckLayerPointer(layer);
+        CheckLayerName(name);
+    };
+};
+
+class TestEqualLayerVisitor : public TestLayerVisitor
+{
+public:
+    explicit TestEqualLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+
+    void VisitEqualLayer(const IConnectableLayer* layer,
+                         const char* name = nullptr) override {
         CheckLayerPointer(layer);
         CheckLayerName(name);
     };
@@ -46,25 +58,37 @@
     };
 };
 
-class TestDivisionLayerVisitor : public TestLayerVisitor
+class TestGatherLayerVisitor : public TestLayerVisitor
 {
 public:
-    explicit TestDivisionLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+    explicit TestGatherLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
 
-    void VisitDivisionLayer(const IConnectableLayer* layer,
-                            const char* name = nullptr) override {
+    void VisitGatherLayer(const IConnectableLayer* layer,
+                          const char* name = nullptr) override {
         CheckLayerPointer(layer);
         CheckLayerName(name);
     };
 };
 
-class TestSubtractionLayerVisitor : public TestLayerVisitor
+class TestGreaterLayerVisitor : public TestLayerVisitor
 {
 public:
-    explicit TestSubtractionLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+    explicit TestGreaterLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
 
-    void VisitSubtractionLayer(const IConnectableLayer* layer,
-                               const char* name = nullptr) override {
+    void VisitGreaterLayer(const IConnectableLayer* layer,
+                           const char* name = nullptr) override {
+        CheckLayerPointer(layer);
+        CheckLayerName(name);
+    };
+};
+
+class TestMultiplicationLayerVisitor : public TestLayerVisitor
+{
+public:
+    explicit TestMultiplicationLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+
+    void VisitMultiplicationLayer(const IConnectableLayer* layer,
+                                  const char* name = nullptr) override {
         CheckLayerPointer(layer);
         CheckLayerName(name);
     };
@@ -94,30 +118,6 @@
     };
 };
 
-class TestGreaterLayerVisitor : public TestLayerVisitor
-{
-public:
-    explicit TestGreaterLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
-
-    void VisitGreaterLayer(const IConnectableLayer* layer,
-                           const char* name = nullptr) override {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-    };
-};
-
-class TestEqualLayerVisitor : public TestLayerVisitor
-{
-public:
-    explicit TestEqualLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
-
-    void VisitEqualLayer(const IConnectableLayer* layer,
-                         const char* name = nullptr) override {
-        CheckLayerPointer(layer);
-        CheckLayerName(name);
-    };
-};
-
 class TestRsqrtLayerVisitor : public TestLayerVisitor
 {
 public:
@@ -130,16 +130,30 @@
     };
 };
 
-class TestGatherLayerVisitor : public TestLayerVisitor
+class TestSliceLayerVisitor : public TestLayerVisitor
 {
 public:
-    explicit TestGatherLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+    explicit TestSliceLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
 
-    void VisitGatherLayer(const IConnectableLayer* layer,
-                          const char* name = nullptr) override {
+    void VisitSliceLayer(const IConnectableLayer* layer,
+                         const SliceDescriptor& sliceDescriptor,
+                         const char* name = nullptr) override
+    {
         CheckLayerPointer(layer);
         CheckLayerName(name);
     };
 };
 
-} //namespace armnn
\ No newline at end of file
+class TestSubtractionLayerVisitor : public TestLayerVisitor
+{
+public:
+    explicit TestSubtractionLayerVisitor(const char* name = nullptr) : TestLayerVisitor(name) {};
+
+    void VisitSubtractionLayer(const IConnectableLayer* layer,
+                               const char* name = nullptr) override {
+        CheckLayerPointer(layer);
+        CheckLayerName(name);
+    };
+};
+
+} // namespace armnn
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 602c4ab..06bfb91 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -745,6 +745,13 @@
     CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer);
 }
 
+void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer,
+                                        const armnn::SliceDescriptor& sliceDescriptor,
+                                        const char* name)
+{
+    throw UnimplementedException("SerializerVisitor::VisitSliceLayer is not implemented");
+}
+
 // Build FlatBuffer for Softmax Layer
 void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
                                           const armnn::SoftmaxDescriptor& softmaxDescriptor,
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 190ed23..8e65902 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -193,6 +193,10 @@
     void VisitRsqrtLayer(const armnn::IConnectableLayer* layer,
                          const char* name = nullptr) override;
 
+    void VisitSliceLayer(const armnn::IConnectableLayer* layer,
+                         const armnn::SliceDescriptor& sliceDescriptor,
+                         const char* name = nullptr) override;
+
     void VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
                            const armnn::SoftmaxDescriptor& softmaxDescriptor,
                            const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index a8d1ead..7f1fd10 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -414,6 +414,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsSliceSupported(const TensorInfo& input,
+                                        const TensorInfo& output,
+                                        const SliceDescriptor& descriptor,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& input,
                                           const TensorInfo& output,
                                           const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 25dbdf2..8df1f8d 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -253,6 +253,11 @@
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSliceSupported(const TensorInfo& input,
+                          const TensorInfo& output,
+                          const SliceDescriptor& descriptor,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index f290cbd..2fa0c92 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2443,7 +2443,7 @@
     ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
     ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
     ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
-    
+
     // Infer number of batches, input size and output size from tensor dimensions
     const uint32_t numBatches = inputInfo.GetShape()[0];
     const uint32_t inputSize  = inputInfo.GetShape()[1];
@@ -2584,4 +2584,59 @@
     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
+void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"SliceQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+
+    const unsigned int rank = inputTensorInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
+    }
+
+    ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
+
+    // Check if m_Begin and m_Size have the expected length
+    if (m_Parameters.m_Begin.size() != rank)
+    {
+        throw InvalidArgumentException(descriptorName +
+            ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
+    }
+    if (m_Parameters.m_Size.size() != rank)
+    {
+        throw InvalidArgumentException(descriptorName +
+            ": Length of size descriptor must equal rank " + std::to_string(rank));
+    }
+
+    // Check if the shape of the output tensor matches m_Size
+    const TensorShape& outputShape = outputTensorInfo.GetShape();
+    for (unsigned int i = 0u; i < rank; ++i)
+    {
+        if (m_Parameters.m_Size[i] != outputShape[i])
+        {
+            throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
+        }
+    }
+
+    // Check if the sum of begin offset and size in a given dimension
+    // does not exceed the size of corresponding input
+    const TensorShape& inputShape  = inputTensorInfo.GetShape();
+    for(unsigned int i = 0u; i < rank; ++i)
+    {
+        if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] >= inputShape[i])
+        {
+            throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
+                std::to_string(i) + " exceeds input size.");
+        }
+    }
+}
+
 } // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 35130ad..1e49243 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -533,4 +533,9 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
-} //namespace armnn
+struct SliceQueueDescriptor : QueueDescriptorWithParameters<SliceDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 17bd98b..9d6b2bd 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -760,6 +760,19 @@
                                                           reason);
             break;
         }
+        case LayerType::Slice:
+        {
+            auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
+
+            const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+            result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
+                                                          OverrideDataType(output, dataType),
+                                                          cLayer->GetParameters(),
+                                                          reason);
+            break;
+        }
         case LayerType::Softmax:
         {
             auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
@@ -1245,6 +1258,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
@@ -1300,4 +1319,4 @@
     return std::unique_ptr<IWorkload>();
 }
 
-} // namepsace armnn
\ No newline at end of file
+} // namepsace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6fd334b..91cf2c7 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -186,6 +186,9 @@
     virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                                      const WorkloadInfo&           info) const;
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 1dc9e97..17b7934 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -473,6 +473,8 @@
 
 DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
 
+DECLARE_LAYER_POLICY_2_PARAM(Slice)
+
 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
 
 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)