COMPMID-1761: NEON: Implement Pack

Change-Id: Icc3392494b1e3361e8fd925da200827c494351b3
Reviewed-on: https://review.mlplatform.org/430
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLStackLayerKernel.h b/arm_compute/core/CL/kernels/CLStackLayerKernel.h
index 4d377da..1511a4e 100644
--- a/arm_compute/core/CL/kernels/CLStackLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLStackLayerKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -50,6 +50,8 @@
     ~CLStackLayerKernel() = default;
     /** Initialise the kernel's inputs and output
      *
+     * @note Supported input tensor rank: up to 4
+     *
      * @param[in]  input       Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in]  axis        The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
      * @param[in]  idx_input   Index of the input tensor in the list of tensors to stack.
@@ -59,7 +61,9 @@
      *
      */
     void configure(const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output);
-    /**  Static function to check if given info will lead to a valid configuration of @ref CLStackLayerKernel
+    /** Static function to check if given info will lead to a valid configuration of @ref CLStackLayerKernel
+     *
+     * @note Supported input tensor rank: up to 4
      *
      * @param[in] input       Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in] axis        The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 26d2aca..a32c507 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -117,6 +117,7 @@
 #include "arm_compute/core/NEON/kernels/NESobel5x5Kernel.h"
 #include "arm_compute/core/NEON/kernels/NESobel7x7Kernel.h"
 #include "arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NEStackLayerKernel.h"
 #include "arm_compute/core/NEON/kernels/NEStridedSliceKernel.h"
 #include "arm_compute/core/NEON/kernels/NETableLookupKernel.h"
 #include "arm_compute/core/NEON/kernels/NEThresholdKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEStackLayerKernel.h b/arm_compute/core/NEON/kernels/NEStackLayerKernel.h
new file mode 100644
index 0000000..3a9e81f
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEStackLayerKernel.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __ARM_COMPUTE_NESTACKLAYERKERNEL_H__
+#define __ARM_COMPUTE_NESTACKLAYERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** NEON kernel to stacks a rank-R tensor into one with rank-(R+1) along the axis dimension.*/
+class NEStackLayerKernel : public INEKernel
+{
+public:
+    const char *name() const override
+    {
+        return "NEStackLayerKernel";
+    }
+    /** Default constructor */
+    NEStackLayerKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEStackLayerKernel(const NEStackLayerKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEStackLayerKernel &operator=(const NEStackLayerKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    NEStackLayerKernel(NEStackLayerKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    NEStackLayerKernel &operator=(NEStackLayerKernel &&) = default;
+    /** Default destructor */
+    ~NEStackLayerKernel() = default;
+    /** Initialise the kernel's inputs and output
+     *
+     * @note Supported input tensor rank: up to 4
+     *
+     * @param[in]  input       Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[in]  axis        The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+     * @param[in]  idx_input   Index of the input tensor in the list of tensors to stack.
+     *                         All tensors in the list must have the same shape
+     * @param[in]  num_tensors Number of tensors to stack
+     * @param[out] output      Output tensor. Data types supported: Same as @p input.
+     *
+     */
+    void configure(const ITensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ITensor *output);
+    /**  Static function to check if given info will lead to a valid configuration of @ref NEStackLayerKernel
+     *
+     * @note Supported input tensor rank: up to 4
+     *
+     * @param[in] input       Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[in] axis        The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+     * @param[in] idx_input   Index of the input tensor in the list of tensors to stack
+     *                        All tensors in the list must have the same shape
+     * @param[in] num_tensors Number of tensors to stack
+     * @param[in] output      Output tensor info. Data types supported: Same as @p input.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output);
+
+    // Inherited methods overridden
+    void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+    /** Template function to run the stack
+     *
+     * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+     */
+    template <typename T>
+    void run_stack(const Window &window);
+
+    /** Common signature for all the specialised stack functions
+     *
+     * @param[in] window Region on which to execute the kernel.
+     */
+    using StackFunctionPtr = void (NEStackLayerKernel::*)(const Window &window);
+
+    const ITensor *_input;
+    ITensor       *_output;
+    unsigned int   _axis;
+    unsigned int   _idx_input;
+    StackFunctionPtr _func;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NESTACKLAYERKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLStackLayer.h b/arm_compute/runtime/CL/functions/CLStackLayer.h
index 9794014..5b821b8 100644
--- a/arm_compute/runtime/CL/functions/CLStackLayer.h
+++ b/arm_compute/runtime/CL/functions/CLStackLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -48,13 +48,17 @@
     CLStackLayer();
     /** Initialise the kernel's inputs vector and output.
      *
+     * @note Supported input tensor rank: up to 4
+     *
      * @param[in]  input  The vectors containing all the tensors with the same shape to stack. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in]  axis   The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
      *                    Negative values wrap around
      * @param[out] output Output tensor. Data types supported: Same as @p input.
      */
     void configure(const std::vector<ICLTensor *> &input, int axis, ICLTensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayer
+    /** Static function to check if given info will lead to a valid configuration of @ref CLStackLayerKernel
+     *
+     * @note Supported input tensor rank: up to 4
      *
      * @param[in] input  The vectors containing all the tensors info with the same shape to stack. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in] axis   The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
@@ -73,5 +77,5 @@
     std::unique_ptr<CLStackLayerKernel[]> _stack_kernels;
     unsigned int                          _num_inputs;
 };
-}
+} // namespace arm_compute
 #endif /* __ARM_COMPUTE_CLSTACKLAYER_H__ */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 2daef70..da61853 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -123,6 +123,7 @@
 #include "arm_compute/runtime/NEON/functions/NESobel7x7.h"
 #include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
 #include "arm_compute/runtime/NEON/functions/NESplit.h"
+#include "arm_compute/runtime/NEON/functions/NEStackLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEStridedSlice.h"
 #include "arm_compute/runtime/NEON/functions/NETableLookup.h"
 #include "arm_compute/runtime/NEON/functions/NEThreshold.h"
diff --git a/arm_compute/runtime/NEON/functions/NEStackLayer.h b/arm_compute/runtime/NEON/functions/NEStackLayer.h
new file mode 100644
index 0000000..6032dae
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEStackLayer.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESTACKLAYER_H__
+#define __ARM_COMPUTE_NESTACKLAYER_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/NEON/kernels/NEStackLayerKernel.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to stack tensors along an axis. This function calls the following kernel:
+ *
+ * -# @ref NEStackLayerKernel
+ *
+ */
+class NEStackLayer : public IFunction
+{
+public:
+    /** Default constructor */
+    NEStackLayer();
+    /** Initialise the kernel's inputs vector and output.
+     *
+     * @note Supported input tensor rank: up to 4
+     *
+     * @param[in]  input  The vectors containing all the tensors with the same shape to stack. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[in]  axis   The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+     *                    Negative values wrap around
+     * @param[out] output Output tensor. Data types supported: Same as @p input.
+     */
+    void configure(const std::vector<ITensor *> &input, int axis, ITensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEStackLayerKernel
+     *
+     * @note Supported input tensor rank: up to 4
+     *
+     * @param[in] input  The vectors containing all the tensors info with the same shape to stack. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[in] axis   The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+     *                   Negative values wrap around
+     * @param[in] output Output tensor info. Data types supported: Same as @p input.
+     *
+     * @return a status
+     */
+    static Status validate(const std::vector<ITensorInfo *> &input, int axis, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    std::vector<ITensor *>                _input;
+    std::unique_ptr<NEStackLayerKernel[]> _stack_kernels;
+    unsigned int                          _num_inputs;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NESTACKLAYER_H__ */
diff --git a/src/core/CL/kernels/CLStackLayerKernel.cpp b/src/core/CL/kernels/CLStackLayerKernel.cpp
index bac8992..ac179ba 100644
--- a/src/core/CL/kernels/CLStackLayerKernel.cpp
+++ b/src/core/CL/kernels/CLStackLayerKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -69,19 +69,9 @@
     auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_stack_shape(*input, axis, num_tensors)));
 
     // Configure kernel window
-    constexpr unsigned int num_elems_processed_per_iteration = 1;
+    Window win = calculate_max_window(*input);
 
-    // The window needs to be based on input as we copy all the depths of input
-    Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
-
-    AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
-    AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
-    bool window_changed = update_window_and_padding(win, input_access, output_access);
-    output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
-    Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
-    return std::make_pair(err, win);
+    return std::make_pair(Status{}, win);
 }
 } // namespace
 
diff --git a/src/core/NEON/kernels/NEStackLayerKernel.cpp b/src/core/NEON/kernels/NEStackLayerKernel.cpp
new file mode 100644
index 0000000..cc60609
--- /dev/null
+++ b/src/core/NEON/kernels/NEStackLayerKernel.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEStackLayerKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+using namespace arm_compute::misc::shape_calculator;
+
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::U8, DataType::S8,
+                                                         DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+                                                         DataType::F16, DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON(idx_input >= num_tensors);
+    ARM_COMPUTE_RETURN_ERROR_ON(axis > input->num_dimensions());
+    ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+
+    if(output->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_stack_shape(*input, axis, num_tensors));
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    }
+
+    return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, unsigned int axis, unsigned int num_tensors, ITensorInfo *output)
+{
+    // Output auto inizialitation if not yet initialized
+    auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_stack_shape(*input, axis, num_tensors)));
+
+    // Configure kernel window
+    Window win = calculate_max_window(*input);
+
+    return std::make_pair(Status{}, win);
+}
+
+inline Coordinates shift_from_axis_and_replace_coordinate(const Coordinates &id, unsigned int axis, unsigned int idx_input)
+{
+    constexpr int   max_out_coord = 5; // Input shape is max a 4D shape, output is max 5D
+    Coordinates id_out        = id;
+    for(unsigned int i = max_out_coord - 1; i > axis; --i)
+    {
+        id_out.set(i, id[i - 1]);
+    }
+    id_out.set(axis, idx_input);
+    return id_out;
+}
+} // namespace
+
+NEStackLayerKernel::NEStackLayerKernel()
+    : _input(nullptr), _output(nullptr), _axis(), _idx_input(), _func(nullptr)
+{
+}
+
+void NEStackLayerKernel::configure(const ITensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), axis, idx_input, num_tensors, output->info()));
+
+    _input     = input;
+    _output    = output;
+    _axis      = axis;
+    _idx_input = idx_input;
+
+    switch(input->info()->element_size())
+    {
+        case 1:
+            _func = &NEStackLayerKernel::run_stack<uint8_t>;
+            break;
+        case 2:
+            _func = &NEStackLayerKernel::run_stack<uint16_t>;
+            break;
+        case 4:
+            _func = &NEStackLayerKernel::run_stack<uint32_t>;
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Element size not supported");
+            break;
+    }
+
+    // Configure kernel window
+    auto win_config = validate_and_configure_window(input->info(), axis, num_tensors, output->info());
+
+    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+    INEKernel::configure(win_config.second);
+}
+
+Status NEStackLayerKernel::validate(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, axis, idx_input, num_tensors, output));
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), axis, num_tensors, output->clone().get()).first);
+    return Status{};
+}
+
+void NEStackLayerKernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+    if(_func != nullptr)
+    {
+        (this->*_func)(window);
+    }
+}
+
+template <typename T>
+void NEStackLayerKernel::run_stack(const Window &window)
+{
+    Window window_out;
+    window_out.use_tensor_dimensions(_output->info()->tensor_shape());
+
+    Iterator input(_input, window);
+    Iterator output(_output, window_out);
+
+    const int stride_x = _output->info()->strides_in_bytes()[0];
+    const int stride_y = _output->info()->num_dimensions() >= 1 ? _output->info()->strides_in_bytes()[1] : 0;
+    const int stride_z = _output->info()->num_dimensions() >= 2 ? _output->info()->strides_in_bytes()[2] : 0;
+    const int stride_w = _output->info()->num_dimensions() >= 3 ? _output->info()->strides_in_bytes()[3] : 0;
+    const int stride_k = _output->info()->num_dimensions() >= 4 ? _output->info()->strides_in_bytes()[4] : 0;
+
+    execute_window_loop(window, [&](const Coordinates & id)
+    {
+        Coordinates id_out                           = shift_from_axis_and_replace_coordinate(id, _axis, _idx_input);
+        const int   idx                              = id_out[0] * stride_x + id_out[1] * stride_y + id_out[2] * stride_z + id_out[3] * stride_w + id_out[4] * stride_k;
+        *(reinterpret_cast<T *>(output.ptr() + idx)) = *(reinterpret_cast<const T *>(input.ptr()));
+    },
+    input);
+}
diff --git a/src/runtime/CL/functions/CLStackLayer.cpp b/src/runtime/CL/functions/CLStackLayer.cpp
index 85adcad..71327fe 100644
--- a/src/runtime/CL/functions/CLStackLayer.cpp
+++ b/src/runtime/CL/functions/CLStackLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -61,15 +61,19 @@
 Status CLStackLayer::validate(const std::vector<ITensorInfo *> &input, int axis, const ITensorInfo *output)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
-    ARM_COMPUTE_RETURN_ERROR_ON(input.size() < 2);
+    ARM_COMPUTE_RETURN_ERROR_ON(input.empty());
 
     // Wrap around negative values
-    const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->num_dimensions() + 1));
+    const size_t       rank   = input[0]->num_dimensions();
+    const unsigned int axis_u = wrap_around(axis, static_cast<int>(rank + 1));
 
     const unsigned int num_inputs = input.size();
 
     for(unsigned int i = 0; i < num_inputs; i++)
     {
+        // All the tensors must have the same rank
+        ARM_COMPUTE_RETURN_ERROR_ON(input[i]->num_dimensions() != rank);
+        // Validate Kernel
         ARM_COMPUTE_RETURN_ON_ERROR(CLStackLayerKernel::validate(input[i], axis_u, i, num_inputs, output));
     }
 
diff --git a/src/runtime/NEON/functions/NEStackLayer.cpp b/src/runtime/NEON/functions/NEStackLayer.cpp
new file mode 100644
index 0000000..2f49c22
--- /dev/null
+++ b/src/runtime/NEON/functions/NEStackLayer.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEStackLayer.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+#include "support/ToolchainSupport.h"
+namespace arm_compute
+{
+NEStackLayer::NEStackLayer() // NOLINT
+    : _input(),
+      _stack_kernels(),
+      _num_inputs(0)
+{
+}
+
+void NEStackLayer::configure(const std::vector<ITensor *> &input, int axis, ITensor *output)
+{
+    _num_inputs    = input.size();
+    _stack_kernels = arm_compute::support::cpp14::make_unique<NEStackLayerKernel[]>(_num_inputs);
+
+    // Wrap around negative values
+    const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
+
+    for(unsigned int i = 0; i < _num_inputs; i++)
+    {
+        _stack_kernels[i].configure(input[i], axis_u, i, _num_inputs, output);
+    }
+}
+
+Status NEStackLayer::validate(const std::vector<ITensorInfo *> &input, int axis, const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
+    ARM_COMPUTE_RETURN_ERROR_ON(input.empty());
+
+    // Wrap around negative values
+    const size_t       rank   = input[0]->num_dimensions();
+    const unsigned int axis_u = wrap_around(axis, static_cast<int>(rank + 1));
+
+    const unsigned int num_inputs = input.size();
+
+    for(unsigned int i = 0; i < num_inputs; i++)
+    {
+        // All the tensors must have the same rank
+        ARM_COMPUTE_RETURN_ERROR_ON(input[i]->num_dimensions() != rank);
+        // Validate Kernel
+        ARM_COMPUTE_RETURN_ON_ERROR(NEStackLayerKernel::validate(input[i], axis_u, i, num_inputs, output));
+    }
+
+    return Status{};
+}
+
+void NEStackLayer::run()
+{
+    for(unsigned i = 0; i < _num_inputs; i++)
+    {
+        NEScheduler::get().schedule(&_stack_kernels[i], Window::DimY);
+    }
+}
+} // namespace arm_compute
\ No newline at end of file
diff --git a/tests/validation/CL/StackLayer.cpp b/tests/validation/CL/StackLayer.cpp
index 0899112..fa2e4ac 100644
--- a/tests/validation/CL/StackLayer.cpp
+++ b/tests/validation/CL/StackLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -117,6 +117,37 @@
 
 TEST_SUITE(CL)
 TEST_SUITE(StackLayer)
+
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+                                                                      framework::dataset::make("InputInfo",
+{
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(9U, 8U), 1, DataType::U8) },
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(1U, 2U), 1, DataType::U8) , TensorInfo(TensorShape(1U, 2U), 1, DataType::U8), TensorInfo(TensorShape(1U, 2U), 1, DataType::U8)}, 
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(2U, 3U), 1, DataType::S32) },
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(7U, 5U, 3U, 8U, 2U), 1, DataType::S32), TensorInfo(TensorShape(7U, 5U, 3U, 8U, 2U), 1, DataType::S32)}, 
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(9U, 8U), 1, DataType::S32) },
+}),
+framework::dataset::make("OutputInfo",
+{
+    TensorInfo(TensorShape(1U, 9U, 8U), 1, DataType::U8),   // Passes, stack 1 tensor on x axis
+    TensorInfo(TensorShape(1U, 3U, 2U), 1, DataType::U8),   // Passes, stack 3 tensors on y axis
+    TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::S32),  // fails axis <  (- input's rank)
+    TensorInfo(TensorShape(3U, 7U, 5U), 1, DataType::S32),  // fails, input dimensions > 4
+    TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::U8),   // fails mismatching data types
+})),
+framework::dataset::make("Axis", { -3, 1, -4, -3, 1 })),
+framework::dataset::make("Expected", { true, true, false, false, false })),
+input_info, output_info, axis, expected)
+{
+    std::vector<TensorInfo>    ti(input_info);
+    std::vector<ITensorInfo *> vec(input_info.size());
+    for(size_t j = 0; j < vec.size(); ++j)
+    {
+        vec[j] = &ti[j];
+    }
+    ARM_COMPUTE_EXPECT(bool(CLStackLayer::validate(vec, axis, &output_info)) == expected, framework::LogLevel::ERRORS);
+}
+
 TEST_SUITE(Shapes1D)
 
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_1d_small,
diff --git a/tests/validation/NEON/StackLayer.cpp b/tests/validation/NEON/StackLayer.cpp
new file mode 100644
index 0000000..c18b9c8
--- /dev/null
+++ b/tests/validation/NEON/StackLayer.cpp
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2018-2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/NEON/functions/NEStackLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/StackLayerFixture.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+// *INDENT-OFF*
+// clang-format off
+/** Data types */
+const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 });
+
+/** Num tensors values to test */
+const auto n_values = framework::dataset::make("NumTensors", { 3, 4 });
+
+/** Shapes 1D to test */
+const auto shapes_1d_small = combine(datasets::Small1DShapes(), framework::dataset::make("Axis", -1, 2));
+
+/** Shapes 2D to test */
+const auto shapes_2d_small = combine(datasets::Small2DShapes(), framework::dataset::make("Axis", -2, 3));
+
+/** Shapes 3D to test */
+const auto shapes_3d_small = combine(datasets::Small3DShapes(), framework::dataset::make("Axis", -3, 4));
+
+/** Shapes 4D to test */
+const auto shapes_4d_small = combine(datasets::Small4DShapes(), framework::dataset::make("Axis", -4, 5));
+
+/** Shapes 1D to test */
+const auto shapes_1d_large = combine(datasets::Large1DShapes(), framework::dataset::make("Axis", -1, 2));
+
+/** Shapes 2D to test */
+const auto shapes_2d_large = combine(datasets::Large2DShapes(), framework::dataset::make("Axis", -2, 3));
+
+/** Shapes 3D to test */
+const auto shapes_3d_large = combine(datasets::Large3DShapes(), framework::dataset::make("Axis", -3, 4));
+
+/** Shapes 4D to test */
+const auto shapes_4d_large = combine(datasets::Large4DShapes(), framework::dataset::make("Axis", -4, 5));
+
+/** Configuration test */
+void validate_configuration(TensorShape shape_in, int axis, DataType data_type, int num_tensors)
+{
+    // Wrap around negative values
+    const unsigned int axis_u = wrap_around(axis, static_cast<int>(shape_in.num_dimensions() + 1));
+
+    const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_in, 1, data_type), axis_u, num_tensors);
+
+    std::vector<Tensor>   tensors(num_tensors);
+    std::vector<ITensor*> src(num_tensors);
+
+    // Create vector of input tensors
+    for(int i = 0; i < num_tensors; ++i)
+    {
+        tensors[i] = create_tensor<Tensor>(shape_in, data_type);
+        src[i]     = &(tensors[i]);
+        ARM_COMPUTE_EXPECT(src[i]->info()->is_resizable(), framework::LogLevel::ERRORS);
+    }
+
+    // Create tensors
+    Tensor dst = create_tensor<Tensor>(shape_dst, data_type);
+
+    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    NEStackLayer stack;
+    stack.configure(src, axis, &dst);
+}
+} // namespace
+
+/** Fixture to use */
+template<typename T>
+using NEStackLayerFixture = StackLayerValidationFixture<Tensor, ITensor, Accessor, NEStackLayer, T>;
+
+using namespace arm_compute::misc::shape_calculator;
+
+TEST_SUITE(NEON)
+TEST_SUITE(StackLayer)
+
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+                                                                      framework::dataset::make("InputInfo",
+{
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(9U, 8U), 1, DataType::U8) },
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(1U, 2U), 1, DataType::U8) , TensorInfo(TensorShape(1U, 2U), 1, DataType::U8), TensorInfo(TensorShape(1U, 2U), 1, DataType::U8)}, 
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(2U, 3U), 1, DataType::S32) },
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(7U, 5U, 3U, 8U, 2U), 1, DataType::S32), TensorInfo(TensorShape(7U, 5U, 3U, 8U, 2U), 1, DataType::S32)}, 
+    std::vector<TensorInfo>{ TensorInfo(TensorShape(9U, 8U), 1, DataType::S32) },
+}),
+framework::dataset::make("OutputInfo",
+{
+    TensorInfo(TensorShape(1U, 9U, 8U), 1, DataType::U8),   // Passes, stack 1 tensor on x axis
+    TensorInfo(TensorShape(1U, 3U, 2U), 1, DataType::U8),   // Passes, stack 3 tensors on y axis
+    TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::S32),  // fails axis <  (- input's rank)
+    TensorInfo(TensorShape(3U, 7U, 5U), 1, DataType::S32),  // fails, input dimensions > 4
+    TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::U8),   // fails mismatching data types
+})),
+framework::dataset::make("Axis", { -3, 1, -4, -3, 1 })),
+framework::dataset::make("Expected", { true, true, false, false, false })),
+input_info, output_info, axis, expected)
+{
+    std::vector<TensorInfo>    ti(input_info);
+    std::vector<ITensorInfo *> vec(input_info.size());
+    for(size_t j = 0; j < vec.size(); ++j)
+    {
+        vec[j] = &ti[j];
+    }
+    ARM_COMPUTE_EXPECT(bool(NEStackLayer::validate(vec, axis, &output_info)) == expected, framework::LogLevel::ERRORS);
+}
+
+TEST_SUITE(Shapes1D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_1d_small,
+                                                                           data_types),
+                                                                           n_values),
+shape_in, axis, data_type, num_tensors)
+{
+    validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<int>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_1d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_1d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<short>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_1d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_1d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<char>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_1d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_1d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes1D
+
+TEST_SUITE(Shapes2D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_2d_small,
+                                                                           data_types),
+                                                                           n_values),
+shape_in, axis, data_type, num_tensors)
+{
+    validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<int>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_2d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_2d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<short>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_2d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_2d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<char>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_2d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_2d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes2D
+
+TEST_SUITE(Shapes3D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_3d_small,
+                                                                           data_types),
+                                                                           n_values),
+shape_in, axis, data_type, num_tensors)
+{
+    validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<int>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_3d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_3d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<short>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_3d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_3d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<char>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_3d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_3d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes3D
+
+TEST_SUITE(Shapes4D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_4d_small,
+                                                                           data_types),
+                                                                           n_values),
+shape_in, axis, data_type, num_tensors)
+{
+    validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<int>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_4d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_4d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S32 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<short>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_4d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_4d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S16 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEStackLayerFixture<char>, framework::DatasetMode::ALL,
+                                                           combine(combine(shapes_4d_small,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+                                                           combine(combine(shapes_4d_large,
+                                                                           framework::dataset::make("DataType", { DataType::S8 })),
+                                                                           n_values))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes4D
+TEST_SUITE_END() // StackLayer
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/StackLayerFixture.h b/tests/validation/fixtures/StackLayerFixture.h
index cab4350..cf055b5 100644
--- a/tests/validation/fixtures/StackLayerFixture.h
+++ b/tests/validation/fixtures/StackLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -80,7 +80,7 @@
         }
 
         // Create tensors
-        CLTensor dst;
+        TensorType dst;
 
         // The output tensor will be auto-initialized within the function