IVGCVSW-3418 Add Arm NN front end support for the new Stack layer

 * Added new StackLayer class
 * Made necessary changes to Descriptors, ILayerSupport, ILayerVisitor, etc.
 * Added unit tests

Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
Change-Id: Ieb97a928a342ffe1901c6058eb895711c358fd3d
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index b097265..bf095ac 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -58,6 +58,7 @@
     SpaceToBatchNd,
     SpaceToDepth,
     Splitter,
+    Stack,
     StridedSlice,
     Subtraction,
     Switch,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 0f9633a..b3f7adc 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -50,6 +50,7 @@
 #include "layers/SpaceToBatchNdLayer.hpp"
 #include "layers/SpaceToDepthLayer.hpp"
 #include "layers/SplitterLayer.hpp"
+#include "layers/StackLayer.hpp"
 #include "layers/StridedSliceLayer.hpp"
 #include "layers/SubtractionLayer.hpp"
 #include "layers/SwitchLayer.hpp"
@@ -126,6 +127,7 @@
 DECLARE_LAYER(SpaceToBatchNd)
 DECLARE_LAYER(SpaceToDepth)
 DECLARE_LAYER(Splitter)
+DECLARE_LAYER(Stack)
 DECLARE_LAYER(StridedSlice)
 DECLARE_LAYER(Subtraction)
 DECLARE_LAYER(Switch)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 3b7a1cf..2949381 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1422,6 +1422,12 @@
     return layer;
 }
 
+IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
+                                          const char* name)
+{
+    return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
+}
+
 void Network::Accept(ILayerVisitor& visitor) const
 {
     for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 7fc5b65..8a99deb 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -197,6 +197,9 @@
                                                       const Optional<ConstTensor>& biases,
                                                       const char* name = nullptr) override;
 
+    IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
+                                     const char* name = nullptr) override;
+
     void Accept(ILayerVisitor& visitor) const override;
 
 private:
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
new file mode 100644
index 0000000..59bc8d5
--- /dev/null
+++ b/src/armnn/layers/StackLayer.cpp
@@ -0,0 +1,98 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "StackLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <queue>
+
+namespace armnn
+{
+
+StackLayer::StackLayer(const StackDescriptor& param, const char* name)
+    : LayerWithParameters(param.m_NumInputs, 1, LayerType::Stack, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+    StackQueueDescriptor descriptor;
+    return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+StackLayer* StackLayer::Clone(Graph& graph) const
+{
+    return CloneBase<StackLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    const TensorShape& inputShape = m_Param.m_InputShape;
+    const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
+    const unsigned int axis = m_Param.m_Axis;
+
+    BOOST_ASSERT(axis <= inputNumDimensions);
+
+    unsigned int dimensionSizes[inputNumDimensions + 1];
+    for (unsigned int i = 0; i < axis; ++i)
+    {
+        dimensionSizes[i] = inputShape[i];
+    }
+
+    dimensionSizes[axis] = m_Param.m_NumInputs;
+
+    for (unsigned int i = axis + 1; i < inputNumDimensions + 1; ++i)
+    {
+        dimensionSizes[i] = inputShape[i-1];
+    }
+
+    TensorShape targetShape = TensorShape(inputNumDimensions + 1, dimensionSizes);
+
+    return std::vector<TensorShape>({ targetShape });
+}
+
+void StackLayer::ValidateTensorShapesFromInputs()
+{
+    // Validates Stack layer.
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "StackLayer: Num Input Slots must match Num Inputs.",
+        m_Param.m_NumInputs,
+        GetNumInputSlots());
+
+    VerifyLayerConnections(m_Param.m_NumInputs, CHECK_LOCATION());
+
+    // Constructs and validates input shapes
+    std::vector<TensorShape> inputShapes;
+    for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
+    {
+        TensorShape inputShape = GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape();
+        if (inputShape != m_Param.m_InputShape)
+        {
+            throw LayerValidationException("ConcatLayer: TensorShape set on InputSlot[" +
+                                           std::to_string(i) +
+                                           "] does not match defined input shape");
+        }
+        inputShapes.push_back(inputShape);
+    }
+
+    auto inferredShapes = InferOutputShapes(inputShapes);
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShapes[0]);
+}
+
+void StackLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitStackLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
new file mode 100644
index 0000000..6c84597
--- /dev/null
+++ b/src/armnn/layers/StackLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a stack operation.
+class StackLayer : public LayerWithParameters<StackDescriptor>
+{
+public:
+    /// Makes a workload for the Stack type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+                                                     const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    StackLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref StackLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+    /// otherwise infers the output shapes from given input shapes and layer properties.
+    /// @param [in] inputShapes The input shapes layer has.
+    /// @return A vector to the inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a StackLayer.
+    /// @param [in] param StackDescriptor to configure the stack operation.
+    /// @param [in] name Optional name for the layer.
+    StackLayer(const StackDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~StackLayer() = default;
+};
+
+} // namespace
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 6ce56e9..24ae8b2 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -25,4 +25,10 @@
 ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsMatch,   PreluValidateTensorShapesFromInputsMatchTest)
 ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsNoMatch, PreluValidateTensorShapesFromInputsNoMatchTest)
 
+// Stack
+ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsMatch,       StackInferOutputShapeFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsNoMatch,     StackInferOutputShapeFromInputsNoMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsMatch,   StackValidateTensorShapesFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsNoMatch, StackValidateTensorShapesFromInputsNoMatchTest)
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 6e5602a..47eabd3 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -13,6 +13,7 @@
 #include <layers/BatchToSpaceNdLayer.hpp>
 #include <layers/SpaceToDepthLayer.hpp>
 #include <layers/PreluLayer.hpp>
+#include <layers/StackLayer.hpp>
 
 #include <boost/algorithm/string.hpp>
 #include <boost/test/unit_test.hpp>
@@ -193,3 +194,156 @@
     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
     BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
 }
+
+void StackInferOutputShapeImpl(const armnn::StackDescriptor           descriptor,
+                               const std::vector<armnn::TensorShape>& inputShapes,
+                               std::vector<armnn::TensorShape>&       outputShapes)
+{
+    armnn::Graph graph;
+    armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
+    outputShapes = stackLayer->InferOutputShapes(inputShapes);
+}
+
+void StackInferOutputShapeFromInputsMatchTest()
+{
+    armnn::Graph graph;
+
+    armnn::StackDescriptor descriptor;
+    descriptor.m_Axis = 1;
+    descriptor.m_NumInputs = 3;
+    descriptor.m_InputShape = armnn::TensorShape
+    (
+        { 4, 2 }  // Defined input shape
+    );
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 4, 2 }, // Actual input shapes
+        { 4, 2 },
+        { 4, 2 }
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+    armnn::TensorShape expectedOutputShape
+    (
+        { 4, 3, 2 }
+    );
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void StackInferOutputShapeFromInputsNoMatchTest()
+{
+    armnn::Graph graph;
+
+    armnn::StackDescriptor descriptor;
+    descriptor.m_Axis = 1;
+    descriptor.m_NumInputs = 3;
+    descriptor.m_InputShape = armnn::TensorShape
+    (
+        { 4, 2 }  // Defined input shape
+    );
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 4, 2 }, // Actual input shapes
+        { 4, 5 }, // Incorrectly shaped input tensor
+        { 4, 2 }
+    };
+
+    // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+    armnn::TensorShape expectedOutputShape
+    (
+        { 4, 3, 2 }
+    );
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void CreateStackLayerHelper(armnn::Graph& graph,
+                            const armnn::StackDescriptor& descriptor,
+                            const std::vector<armnn::TensorShape>& inputShapes,
+                            const armnn::TensorShape& outputShape)
+{
+    // Creates the Stack layer
+    armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
+
+    // Creates extra layers
+    std::vector<armnn::Layer*> inputs;
+    for (unsigned int i=0; i<inputShapes.size(); ++i)
+    {
+        inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
+    }
+    armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    // Connects up
+    std::vector<armnn::TensorInfo> inputTensorInfos;
+    for (unsigned int i=0; i<inputs.size(); ++i)
+    {
+        inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
+    }
+    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+    for (unsigned int i=0; i<inputs.size(); ++i)
+    {
+        Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
+    }
+    Connect(stackLayer, output, outputTensorInfo, 0, 0);
+}
+
+void StackValidateTensorShapesFromInputsMatchTest()
+{
+    armnn::Graph graph;
+
+    armnn::StackDescriptor descriptor;
+    descriptor.m_Axis = 0;
+    descriptor.m_NumInputs = 3;
+    descriptor.m_InputShape = armnn::TensorShape
+    (
+        { 2, 5 }  // Defined input shape
+    );
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 2, 5 }, // Actual input shapes
+        { 2, 5 },
+        { 2, 5 }
+    };
+
+    // Creates the Stack layer
+    CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
+
+    // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+}
+
+void StackValidateTensorShapesFromInputsNoMatchTest()
+{
+    armnn::Graph graph;
+
+    armnn::StackDescriptor descriptor;
+    descriptor.m_Axis = 0;
+    descriptor.m_NumInputs = 3;
+    descriptor.m_InputShape = armnn::TensorShape
+    (
+        { 2, 5 }  // Defined input shape
+    );
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 2, 5 }, // Actual input shapes
+        { 2, 2 }, // Incorrectly shaped input tensor
+        { 2, 5 }
+    };
+
+    // Creates the Stack layer
+    CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
+
+    // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+    BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+}
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index c9719aa..0a9e335 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -925,6 +925,13 @@
     CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
 }
 
+void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
+                                        const armnn::StackDescriptor& stackDescriptor,
+                                        const char* name)
+{
+    throw UnimplementedException("SerializerVisitor::VisitStackLayer not yet implemented");
+}
+
 void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
                                                const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
                                                const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 0383d105..8404a7f 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -202,6 +202,10 @@
                             const armnn::ViewsDescriptor& viewsDescriptor,
                             const char* name = nullptr) override;
 
+    void VisitStackLayer(const armnn::IConnectableLayer* layer,
+                         const armnn::StackDescriptor& stackDescriptor,
+                         const char* name = nullptr) override;
+
     void VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
                                 const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
                                 const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index ea22fac..26b98a2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -415,6 +415,14 @@
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*> inputs,
+                                        const TensorInfo& output,
+                                        const StackDescriptor& descriptor,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
                                                const TensorInfo& output,
                                                const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 36b8e77..dad0798 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -257,6 +257,11 @@
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsStackSupported(const std::vector<const TensorInfo*> inputs,
+                          const TensorInfo& output,
+                          const StackDescriptor& descriptor,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsStridedSliceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 324c1de..8786023 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -582,6 +582,91 @@
 }
 
 //---------------------------------------------------------------
+void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1);
+
+    if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
+    {
+        throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors.");
+    }
+
+    // All inputs must have the same shape, which is defined in parameters
+    const TensorShape& inputShape = m_Parameters.m_InputShape;
+    for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+    {
+        if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
+        {
+            throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes "
+                                           "must match the defined shape.");
+        }
+    }
+
+    // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
+    // since the output tensor has an additional dimension.
+    if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
+    {
+        throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater "
+                                       "than the number of input dimensions.");
+    }
+
+    // Output shape must be as inferred from the input shape
+    const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
+    for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
+    {
+        if (outputShape[i] != inputShape[i])
+        {
+            throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+                                           "match shape inferred from input tensor.");
+        }
+    }
+
+    if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
+    {
+        throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+                                       "match shape inferred from input tensor.");
+    }
+
+    for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
+    {
+        if (outputShape[i] != inputShape[i-1])
+        {
+            throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+                                           "match shape inferred from input tensor.");
+        }
+    }
+
+    // Check the supported data types
+    std::vector<DataType> supportedTypes =
+    {
+            DataType::Float32,
+            DataType::Float16,
+            DataType::Boolean,
+            DataType::Signed32,
+            DataType::QuantisedAsymm8,
+            DataType::QuantisedSymm16
+    };
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+                      supportedTypes,
+                      "StackQueueDescriptor");
+
+    for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+    {
+        ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+                                     workloadInfo.m_InputTensorInfos[i],
+                                     "StackQueueDescriptor",
+                                     "InputTensor[0]",
+                                     "InputTensor[" + std::to_string(i) + "]");
+    }
+    ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+                                 workloadInfo.m_OutputTensorInfos[0],
+                                 "StackQueueDescriptor",
+                                 "InputTensor[0]",
+                                 "OutputTensor[0]");
+}
+
+//---------------------------------------------------------------
 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index d241f7b..f3d5069 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -110,6 +110,12 @@
 // Deprecated. Use ConcatQueueDescriptor instead
 using MergerQueueDescriptor = ConcatQueueDescriptor;
 
+// Stack layer workload data.
+struct StackQueueDescriptor : QueueDescriptorWithParameters<StackDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 // Activation layer workload data.
 struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1c23e17..a24a325 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -729,6 +729,33 @@
                                                              reason);
             break;
         }
+        case LayerType::Stack:
+        {
+            auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
+
+            // Get vector of all inputs.
+            auto getTensorInfo = [&dataType](const InputSlot& slot)
+                {
+                    return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+                };
+            auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
+            auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
+            std::vector<TensorInfo> inputs(beginI, endI);
+
+            auto getTensorInfoPtr = [](const TensorInfo& info)
+                {
+                    return &info;
+                };
+            auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+            auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+            std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
+
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+            result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+
+            break;
+        }
         case LayerType::StridedSlice:
         {
             auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
@@ -1130,6 +1157,12 @@
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+                                                         const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& Info) const
 {
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index e09640f..749a258 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -189,6 +189,9 @@
     virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
                                                       const WorkloadInfo&            info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& Info) const;
+
     virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                           const WorkloadInfo& Info) const;
 
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index b02ab7b..6aff759 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -408,6 +408,8 @@
 
 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
 
+DECLARE_LAYER_POLICY_2_PARAM(Stack)
+
 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
 
 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)