IVGCVSW-3258 Add front end support for new SpaceToDepth layer

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Id677e29a734f2b36483d939ad370079bdc11551e
diff --git a/Android.mk b/Android.mk
index f791c1a..b5376ac 100644
--- a/Android.mk
+++ b/Android.mk
@@ -128,6 +128,7 @@
         src/armnn/layers/ResizeBilinearLayer.cpp \
         src/armnn/layers/RsqrtLayer.cpp \
         src/armnn/layers/SpaceToBatchNdLayer.cpp \
+        src/armnn/layers/SpaceToDepthLayer.cpp \
         src/armnn/layers/SoftmaxLayer.cpp \
         src/armnn/layers/SplitterLayer.cpp \
         src/armnn/layers/StridedSliceLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8a9b976..1b6abb6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -300,6 +300,8 @@
     src/armnn/layers/ReshapeLayer.cpp
     src/armnn/layers/SpaceToBatchNdLayer.hpp
     src/armnn/layers/SpaceToBatchNdLayer.cpp
+    src/armnn/layers/SpaceToDepthLayer.hpp
+    src/armnn/layers/SpaceToDepthLayer.cpp
     src/armnn/layers/ResizeBilinearLayer.hpp
     src/armnn/layers/ResizeBilinearLayer.cpp
     src/armnn/layers/RsqrtLayer.cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 25bf818..9479db3 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -540,6 +540,20 @@
     DataLayout m_DataLayout;
 };
 
+/// A SpaceToDepthDescriptor for the SpaceToDepthLayer
+struct SpaceToDepthDescriptor
+{
+    SpaceToDepthDescriptor()
+    : m_BlockSize(1u)
+    , m_DataLayout(DataLayout::NHWC)
+    {}
+
+    /// Scalar specifying the input block size. It must be >= 1
+    unsigned int m_BlockSize;
+    /// The data layout to be used (NCHW, NHWC).
+    DataLayout m_DataLayout;
+};
+
 /// An LstmDescriptor for the LstmLayer.
 struct LstmDescriptor
 {
@@ -667,4 +681,4 @@
     unsigned int m_NumOutputSlots;
 };
 
-}
+} // namespace armnn
\ No newline at end of file
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 4f47738..1c75c25 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -28,6 +28,7 @@
 struct ResizeBilinearDescriptor;
 struct SoftmaxDescriptor;
 struct SpaceToBatchNdDescriptor;
+struct SpaceToDepthDescriptor;
 struct StridedSliceDescriptor;
 struct ViewsDescriptor;
 
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index f41495c..4c113d3 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -260,6 +260,11 @@
                                            const SpaceToBatchNdDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsSpaceToDepthSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const SpaceToDepthDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     virtual bool IsSplitterSupported(const TensorInfo& input,
                                      const ViewsDescriptor& descriptor,
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index cbddb2d..ab83dbf 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -336,6 +336,14 @@
                                           const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                           const char* name = nullptr) = 0;
 
+    /// Function a space to depth layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
+    /// @param name - Optional name for the layer.
+    virtual void VisitSpaceToDepthLayer(const IConnectableLayer* layer,
+                                        const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                        const char* name = nullptr) = 0;
+
     /// Function that a splitter layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index f3dfcd8..e5ebbc4 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -328,6 +328,13 @@
     virtual IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                       const char* name = nullptr) = 0;
 
+    /// Adds a space to depth layer to the network.
+    /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                                    const char* name = nullptr) = 0;
+
     /// Adds a floor layer to the network.
     /// @param name - Optional name for the layer.
     /// @return - Interface for configuring the layer.
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 0ae8705..d58aa87 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -326,6 +326,14 @@
                                char* reasonIfUnsupported = nullptr,
                                size_t reasonIfUnsupportedMaxLength = 1024);
 
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsSpaceToDepthSupported(const BackendId& backend,
+                             const TensorInfo& input,
+                             const TensorInfo& output,
+                             const SpaceToDepthDescriptor& descriptor,
+                             char* reasonIfUnsupported = nullptr,
+                             size_t reasonIfUnsupportedMaxLength = 1024);
+
 ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
 bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 47a8384..f1abab5 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -173,6 +173,10 @@
                                   const SpaceToBatchNdDescriptor&,
                                   const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitSpaceToDepthLayer(const IConnectableLayer*,
+                                const SpaceToDepthDescriptor&,
+                                const char*) override { DefaultPolicy::Apply(__func__); }
+
     void VisitSplitterLayer(const IConnectableLayer*,
                             const ViewsDescriptor&,
                             const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 9a215e6..377fb92 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -55,6 +55,7 @@
     Rsqrt,
     Softmax,
     SpaceToBatchNd,
+    SpaceToDepth,
     Splitter,
     StridedSlice,
     Subtraction,
@@ -66,6 +67,6 @@
 const char* GetLayerTypeAsCString(LayerType type);
 
 using Coordinates = std::array<unsigned int, MaxNumOfTensorDimensions>;
-using Dimensions = std::array<unsigned int, MaxNumOfTensorDimensions>;
+using Dimensions  = std::array<unsigned int, MaxNumOfTensorDimensions>;
 
 }
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 40330f2..de9717c 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -47,6 +47,7 @@
 #include "layers/RsqrtLayer.hpp"
 #include "layers/SoftmaxLayer.hpp"
 #include "layers/SpaceToBatchNdLayer.hpp"
+#include "layers/SpaceToDepthLayer.hpp"
 #include "layers/SplitterLayer.hpp"
 #include "layers/StridedSliceLayer.hpp"
 #include "layers/SubtractionLayer.hpp"
@@ -120,6 +121,7 @@
 DECLARE_LAYER(Rsqrt)
 DECLARE_LAYER(Softmax)
 DECLARE_LAYER(SpaceToBatchNd)
+DECLARE_LAYER(SpaceToDepth)
 DECLARE_LAYER(Splitter)
 DECLARE_LAYER(StridedSlice)
 DECLARE_LAYER(Subtraction)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 6bd365b..3e7d4d5 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -835,6 +835,12 @@
     return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
 }
 
+IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                                 const char* name)
+{
+    return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
+}
+
 IConnectableLayer* Network::AddFloorLayer(const char* name)
 {
     return m_Graph->AddLayer<FloorLayer>(name);
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 52a2714..2648c3f 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -147,6 +147,9 @@
     IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                               const char* name = nullptr) override;
 
+    IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                            const char* name = nullptr) override;
+
     IConnectableLayer* AddFloorLayer(const char* name = nullptr) override;
 
     IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) override;
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
new file mode 100644
index 0000000..b24490f
--- /dev/null
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -0,0 +1,85 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpaceToDepthLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+#include <numeric>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+SpaceToDepthLayer::SpaceToDepthLayer(const SpaceToDepthDescriptor param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::SpaceToDepth, param, name)
+{}
+
+std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const Graph& graph,
+                                                             const IWorkloadFactory& factory) const
+{
+    SpaceToDepthQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_BlockSize  = m_Param.m_BlockSize;
+    descriptor.m_Parameters.m_DataLayout = m_Param.m_DataLayout;
+
+    return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
+{
+    return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 1);
+
+    TensorShape inputShape = inputShapes[0];
+    TensorShape outputShape(inputShape);
+
+    outputShape[0] = inputShape[0];
+
+    DataLayoutIndexed dimensionIndices{m_Param.m_DataLayout};
+    unsigned int hIndex = dimensionIndices.GetHeightIndex();
+    unsigned int wIndex = dimensionIndices.GetWidthIndex();
+    unsigned int cIndex = dimensionIndices.GetChannelsIndex();
+
+    outputShape[hIndex] = inputShape[hIndex] / m_Param.m_BlockSize;
+    outputShape[wIndex] = inputShape[wIndex] / m_Param.m_BlockSize;
+
+    outputShape[cIndex] = inputShape[cIndex] * m_Param.m_BlockSize * m_Param.m_BlockSize;
+
+    return std::vector<TensorShape>({ outputShape });
+}
+
+void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    std::vector<TensorShape> inferredShapes = InferOutputShapes({
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitSpaceToDepthLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
new file mode 100644
index 0000000..b83a9e0
--- /dev/null
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -0,0 +1,50 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a SpaceToDepth operation.
+class SpaceToDepthLayer : public LayerWithParameters<SpaceToDepthDescriptor>
+{
+public:
+    /// Makes a workload for the SpaceToDepth type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    SpaceToDepthLayer* Clone(Graph& graph) const override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref SpaceToDepthLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a SpaceToDepthLayer.
+    /// @param [in] param SpaceToDepthDescriptor to configure the SpaceToDepthLayer operation.
+    /// @param [in] name Optional name for the layer.
+    SpaceToDepthLayer(const SpaceToDepthDescriptor param, const char* name);
+
+    /// Default destructor
+    ~SpaceToDepthLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/LayerValidateOutputTest.cpp b/src/armnn/test/LayerValidateOutputTest.cpp
index 999844e..acefd51 100644
--- a/src/armnn/test/LayerValidateOutputTest.cpp
+++ b/src/armnn/test/LayerValidateOutputTest.cpp
@@ -4,11 +4,12 @@
 //
 #include <armnn/ArmNN.hpp>
 
+#include <Graph.hpp>
+#include <layers/BatchToSpaceNdLayer.hpp>
+#include <layers/SpaceToDepthLayer.hpp>
+
 #include <boost/algorithm/string.hpp>
 #include <boost/test/unit_test.hpp>
-#include <layers/BatchToSpaceNdLayer.hpp>
-#include <Graph.hpp>
-
 
 BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
 
@@ -35,4 +36,26 @@
     BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
 }
 
+BOOST_AUTO_TEST_CASE(TestSpaceToDepthInferOutputShape)
+{
+    armnn::Graph graph;
+
+    armnn::SpaceToDepthDescriptor descriptor;
+    descriptor.m_BlockSize  = 2;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::SpaceToDepthLayer* const spaceToDepthLayer =
+        graph.AddLayer<armnn::SpaceToDepthLayer>(descriptor, "spaceToDepth");
+
+    std::vector<armnn::TensorShape> shapes;
+    const std::vector<unsigned int> dimSizes{ 1, 16, 8, 3 };
+    armnn::TensorShape shape(4, dimSizes.data());
+    shapes.push_back(shape);
+
+    const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
+    armnn::TensorShape expectedShape(4, expectedDimSizes.data());
+
+    BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index ccf2199..676d10c 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -783,6 +783,14 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
 }
 
+// Build FlatBuffer for SpaceToDepthLayer
+void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
+                                               const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                               const char* name)
+{
+    throw armnn::Exception("SerializerVisitor::VisitSpaceToDepthLayer is not yet implemented");
+}
+
 // Build FlatBuffer for Splitter Layer
 void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer,
                                            const armnn::ViewsDescriptor& viewsDescriptor,
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 2e2816a..c9416bb 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -182,6 +182,10 @@
                                   const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                   const char* name = nullptr) override;
 
+    void VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
+                                const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                const char* name = nullptr) override;
+
     void VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
                                  const armnn::NormalizationDescriptor& normalizationDescriptor,
                                  const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 71b1745..48705c8 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -392,6 +392,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const SpaceToDepthDescriptor& descriptor,
+                                               Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
                                            const ViewsDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 7552758..4921cf9 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -247,6 +247,11 @@
                                    const SpaceToBatchNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSpaceToDepthSupported(const TensorInfo& input,
+                                 const TensorInfo& output,
+                                 const SpaceToDepthDescriptor& descriptor,
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     bool IsSplitterSupported(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 3e33b94..501fdd8 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -309,6 +309,11 @@
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct SpaceToDepthQueueDescriptor : QueueDescriptorWithParameters<SpaceToDepthDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct FloorQueueDescriptor : QueueDescriptor
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index f026e1e..678d330 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -680,6 +680,19 @@
                                                                    reason);
             break;
         }
+        case LayerType::SpaceToDepth:
+        {
+            auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
+
+            const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+            result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
+                                                                 OverrideDataType(output, dataType),
+                                                                 cLayer->GetParameters(),
+                                                                 reason);
+            break;
+        }
         case LayerType::Splitter:
         {
             auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
@@ -1044,6 +1057,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
+                                                                const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& Info) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 11c36eb..cc99356 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -173,6 +173,9 @@
     virtual std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
                                                          const WorkloadInfo& info) const;
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 7161464..fa6ec10 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -396,6 +396,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
 
+DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
+
 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
 
 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)