COMPMID-1947: Implement NESpaceToBatch

Change-Id: I59b3c17874ba24559b7fddf74f7659a1b9177759
Signed-off-by: giuros01 <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/735
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index cc6af39..6509e39 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -120,6 +120,7 @@
 #include "arm_compute/core/NEON/kernels/NESobel5x5Kernel.h"
 #include "arm_compute/core/NEON/kernels/NESobel7x7Kernel.h"
 #include "arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
 #include "arm_compute/core/NEON/kernels/NEStackLayerKernel.h"
 #include "arm_compute/core/NEON/kernels/NEStridedSliceKernel.h"
 #include "arm_compute/core/NEON/kernels/NETableLookupKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h b/arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h
new file mode 100644
index 0000000..1a138f2
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESPACETOBATCHLAYERKERNEL_H__
+#define __ARM_COMPUTE_NESPACETOBATCHLAYERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Interface for the space to batch kernel */
+class NESpaceToBatchLayerKernel : public INEKernel
+{
+public:
+    const char *name() const override
+    {
+        return "NESpaceToBatchLayerKernel";
+    }
+    /** Default constructor */
+    NESpaceToBatchLayerKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NESpaceToBatchLayerKernel(const NESpaceToBatchLayerKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NESpaceToBatchLayerKernel &operator=(const NESpaceToBatchLayerKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    NESpaceToBatchLayerKernel(NESpaceToBatchLayerKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    NESpaceToBatchLayerKernel &operator=(NESpaceToBatchLayerKernel &&) = default;
+    /** Default destructor */
+    ~NESpaceToBatchLayerKernel() = default;
+    /** Initialise the kernel's inputs and output.
+     *
+     * @param[in]  input       Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in]  block_shape 1-D tensor with shape [M]. Data types supported: S32
+     * @param[in]  paddings    2-D tensor with shape [2, M]. Data types supported: S32
+     * @param[out] output      Tensor output. Data types supported: same as @p input
+     */
+    void configure(const ITensor *input, const ITensor *block_shape, const ITensor *paddings, ITensor *output);
+    /** Initialise the kernel's input and output. (Static block shape and paddings)
+     *
+     * @param[in]  input         Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in]  block_shape_x Block shape x value.
+     * @param[in]  block_shape_y Block shape y value.
+     * @param[in]  padding_left  The left padding of the output tensor.
+     * @param[in]  padding_right The right padding of the output tensor.
+     * @param[out] output        Tensor output. Data types supported: same as @p input
+     */
+    void configure(const ITensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ITensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToBatchLayerKernel
+     *
+     * @param[in] input       Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
+     * @param[in] paddings    2-D tensor with shape [2, M]. Data types supported: S32
+     * @param[in] output      Tensor output. Data types supported: same as @p input
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToBatchLayerKernel (Static block shape and paddings)
+     *
+     * @param[in] input         Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] block_shape_x Block shape x value.
+     * @param[in] block_shape_y Block shape y value.
+     * @param[in] padding_left  The left padding of the output tensor.
+     * @param[in] padding_right The right padding of the output tensor.
+     * @param[in] output        Tensor output. Data types supported: same as @p input
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+    const ITensor *_input;       /**< Source tensor */
+    const ITensor *_block_shape; /**< Block shape tensor */
+    const ITensor *_paddings;    /**< Paddings tensor */
+    ITensor       *_output;      /**< Destination tensor */
+
+    Size2D _padding_left;
+    int    _block_shape_x;
+    int    _block_shape_y;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NESPACETOBATCHLAYERKERNEL_H__ */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index cc13ff5..a07e432 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -125,6 +125,7 @@
 #include "arm_compute/runtime/NEON/functions/NESobel5x5.h"
 #include "arm_compute/runtime/NEON/functions/NESobel7x7.h"
 #include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
+#include "arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h"
 #include "arm_compute/runtime/NEON/functions/NESplit.h"
 #include "arm_compute/runtime/NEON/functions/NEStackLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEStridedSlice.h"
diff --git a/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h b/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h
new file mode 100644
index 0000000..5b7d793
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESPACETOBATCHLAYER_H__
+#define __ARM_COMPUTE_NESPACETOBATCHLAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/NEON/kernels/NEMemsetKernel.h"
+#include "arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to spatial divide a tensor. This function calls the following NEON kernels/functions:
+ *
+ *  -# @ref NEMemsetKernel
+ *  -# @ref NESpaceToBatchLayerKernel
+ */
+class NESpaceToBatchLayer : public IFunction
+{
+public:
+    /** Default constructor */
+    NESpaceToBatchLayer();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NESpaceToBatchLayer(const NESpaceToBatchLayer &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NESpaceToBatchLayer &operator=(const NESpaceToBatchLayer &) = delete;
+    /** Allow instances of this class to be moved */
+    NESpaceToBatchLayer(NESpaceToBatchLayer &&) = default;
+    /** Allow instances of this class to be moved */
+    NESpaceToBatchLayer &operator=(NESpaceToBatchLayer &&) = default;
+    /** Default destructor */
+    virtual ~NESpaceToBatchLayer() = default;
+    /** Set the input and output tensors.
+     *
+     * @param[in]  input       Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in]  block_shape 1-D tensor with shape [M]. Data types supported: S32
+     * @param[in]  paddings    2-D tensor with shape [2, M]. Data types supported: S32
+     * @param[out] output      Tensor output. Data types supported: same as @p input
+     */
+    void configure(const ITensor *input, const ITensor *block_shape, const ITensor *paddings, ITensor *output);
+    /** Set the input and output tensors. (Static block shape and paddings)
+     *
+     * @param[in]  input         Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in]  block_shape_x Block shape x value.
+     * @param[in]  block_shape_y Block shape y value.
+     * @param[in]  padding_left  The left padding of the output tensor.
+     * @param[in]  padding_right The right padding of the output tensor.
+     * @param[out] output        Tensor output. Data types supported: same as @p input
+     */
+    void configure(const ITensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ITensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToBatchLayer
+     *
+     * @param[in] input       Tensor input info. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] block_shape block shape tensor info with shape [M]. Data types supported: S32
+     * @param[in] paddings    paddings tensor info with shape [2, M]. Data types supported: S32
+     * @param[in] output      Tensor output info. Data types supported: same as @p input
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToBatchLayer (Static block shape and paddings)
+     *
+     * @param[in] input         Tensor input info. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] block_shape_x Block shape x value.
+     * @param[in] block_shape_y Block shape y value.
+     * @param[in] padding_left  The left padding of the output tensor.
+     * @param[in] padding_right The right padding of the output tensor.
+     * @param[in] output        Tensor output info. Data types supported: same as @p input
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    NESpaceToBatchLayerKernel _space_to_batch_kernel; /**< SpaceToBatch kernel to run */
+    NEMemsetKernel            _memset_kernel;         /**< Memset kernel to run */
+    bool                      _has_padding;           /**< Flag to check if the output has padding */
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NESPACETOBATCHLAYER_H__ */
diff --git a/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp b/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp
new file mode 100644
index 0000000..2e46b14
--- /dev/null
+++ b/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include <arm_neon.h>
+#include <cstdint>
+
+using namespace arm_compute::misc::shape_calculator;
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_info, const ITensorInfo *padddings, const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_info, padddings, output);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, 1, DataType::S32);
+    ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+    ARM_COMPUTE_RETURN_ERROR_ON(block_info->num_dimensions() > 1);
+    ARM_COMPUTE_RETURN_ERROR_ON(padddings->num_dimensions() > 2);
+    ARM_COMPUTE_RETURN_ERROR_ON(padddings->tensor_shape()[1] != block_info->tensor_shape()[0]);
+
+    // Validate output if initialized
+    if(output->total_size() != 0)
+    {
+        const DataLayout data_layout = input->data_layout();
+        const int        idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+        ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    }
+
+    return Status{};
+}
+Status validate_arguments_static(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
+                                 const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_RETURN_ERROR_ON(block_shape_x < 1 || block_shape_y < 1);
+    ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+
+    // Validate output if initialized
+    if(output->total_size() != 0)
+    {
+        const DataLayout data_layout = input->data_layout();
+        const int        idx_width   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+        const int        idx_height  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+        const int        idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+        const int        idx_batch   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+        ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] < padding_left.x() + padding_right.y());
+        ARM_COMPUTE_RETURN_ERROR_ON((input->tensor_shape()[idx_width] + padding_left.x() + padding_right.x()) % block_shape_x != 0);
+        ARM_COMPUTE_RETURN_ERROR_ON((input->tensor_shape()[idx_height] + padding_left.y() + padding_right.y()) % block_shape_y != 0);
+        ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]);
+        ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_batch] % (block_shape_x * block_shape_y) != 0);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    }
+
+    return Status{};
+}
+} // namespace
+
+NESpaceToBatchLayerKernel::NESpaceToBatchLayerKernel()
+    : _input(nullptr), _block_shape(nullptr), _paddings(nullptr), _output(nullptr), _padding_left(), _block_shape_x(), _block_shape_y()
+{
+}
+
+void NESpaceToBatchLayerKernel::configure(const ITensor *input, const ITensor *block_shape, const ITensor *paddings, ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), block_shape->info(), paddings->info(), output->info()));
+
+    _input       = input;
+    _block_shape = block_shape;
+    _paddings    = paddings;
+    _output      = output;
+
+    // Configure kernel window
+    Window win = calculate_max_window(*output->info(), Steps());
+    ICPPKernel::configure(win);
+}
+
+void NESpaceToBatchLayerKernel::configure(const ITensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
+                                          ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    TensorShape output_shape = misc::shape_calculator::compute_space_to_batch_shape(input->info(), block_shape_x, block_shape_y, padding_left, padding_right);
+    auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
+
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, padding_left, padding_right, output->info()));
+
+    _input         = input;
+    _output        = output;
+    _block_shape_x = block_shape_x;
+    _block_shape_y = block_shape_y;
+    _padding_left  = padding_left;
+
+    // Configure kernel window
+    Window win = calculate_max_window(*output->info(), Steps());
+    INEKernel::configure(win);
+}
+
+Status NESpaceToBatchLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, block_shape, paddings, output));
+    return Status{};
+}
+Status NESpaceToBatchLayerKernel::validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
+                                           const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, padding_left, padding_right, output));
+    return Status{};
+}
+
+void NESpaceToBatchLayerKernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
+
+    if(_block_shape != nullptr)
+    {
+        // Retrieve the block shapes dynamically
+        _block_shape_x = *(reinterpret_cast<const int *>(_block_shape->ptr_to_element(0)));
+        _block_shape_y = *(reinterpret_cast<const int *>(_block_shape->ptr_to_element(1)));
+    }
+
+    if(_paddings != nullptr)
+    {
+        const size_t pad_left_x = *reinterpret_cast<const size_t *>(_paddings->ptr_to_element({ 0, 0 }));
+        const size_t pad_left_y = *reinterpret_cast<const size_t *>(_paddings->ptr_to_element({ 1, 0 }));
+        _padding_left           = Size2D(pad_left_x, pad_left_y);
+    }
+    const DataLayout data_layout  = _input->info()->data_layout();
+    const int        height_idx   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+    const int        width_idx    = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+    const int        element_size = _input->info()->element_size();
+
+    const size_t height     = _input->info()->dimension(height_idx);
+    const size_t width      = _input->info()->dimension(width_idx);
+    const size_t batch_size = _input->info()->dimension(3);
+
+    Window slice_out = window.first_slice_window_3D();
+    Window slice_in  = window.first_slice_window_4D();
+
+    slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+    slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+    slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
+    slice_in.set(3, Window::Dimension(0, 0, 0));
+
+    int batch_id = 0;
+
+    // Main loop for NCHW and NHWC
+    if(_output->info()->data_layout() == DataLayout::NCHW)
+    {
+        do
+        {
+            Iterator out(_output, slice_out);
+            execute_window_loop(slice_out, [&](const Coordinates & id)
+            {
+                const size_t out_x = id.x();
+                const size_t out_y = id.y();
+                const size_t z     = id.z();
+                const size_t pos_x = out_x * _block_shape_x + (batch_id / batch_size) % _block_shape_x;
+                const size_t pos_y = out_y * _block_shape_y + (batch_id / batch_size) / _block_shape_x;
+                if(pos_y >= _padding_left.y() && pos_y < _padding_left.y() + height && pos_x >= _padding_left.x() && pos_x < _padding_left.x() + width)
+                {
+                    const int   w    = batch_id % batch_size;
+                    const int   in_x = pos_x - _padding_left.x();
+                    const int   in_y = pos_y - _padding_left.y();
+                    Coordinates input_coords{ in_x, in_y, z, w };
+                    memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size);
+                }
+            },
+            out);
+            ++batch_id;
+        }
+        while(window.slide_window_slice_3D(slice_out));
+    }
+    else
+    {
+        do
+        {
+            Iterator out(_output, slice_out);
+            execute_window_loop(slice_out, [&](const Coordinates & id)
+            {
+                const size_t out_x = id.y();
+                const size_t out_y = id.z();
+                const size_t z     = id.x();
+                const size_t pos_x = out_x * _block_shape_x + (batch_id / batch_size) % _block_shape_x;
+                const size_t pos_y = out_y * _block_shape_y + (batch_id / batch_size) / _block_shape_x;
+                if(pos_y >= _padding_left.y() && pos_y < _padding_left.y() + height && pos_x >= _padding_left.x() && pos_x < _padding_left.x() + width)
+                {
+                    const int   w    = batch_id % batch_size;
+                    const int   in_x = pos_x - _padding_left.x();
+                    const int   in_y = pos_y - _padding_left.y();
+                    Coordinates input_coords{ z, in_x, in_y, w };
+                    memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size);
+                }
+            },
+            out);
+            ++batch_id;
+        }
+        while(window.slide_window_slice_3D(slice_out));
+    }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp b/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
new file mode 100644
index 0000000..46c28ad
--- /dev/null
+++ b/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+NESpaceToBatchLayer::NESpaceToBatchLayer()
+    : _space_to_batch_kernel(), _memset_kernel(), _has_padding(false)
+{
+}
+
+void NESpaceToBatchLayer::configure(const ITensor *input, const ITensor *block_shape, const ITensor *paddings, ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, block_shape, paddings, output);
+
+    if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
+    {
+        _has_padding = true;
+        _memset_kernel.configure(output, PixelValue());
+    }
+    _space_to_batch_kernel.configure(input, block_shape, paddings, output);
+}
+
+void NESpaceToBatchLayer::configure(const ITensor *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right, ITensor *output)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
+    {
+        _has_padding = true;
+        _memset_kernel.configure(output, PixelValue());
+    }
+    _space_to_batch_kernel.configure(input, block_shape_x, block_shape_y, padding_left, padding_right, output);
+}
+
+Status NESpaceToBatchLayer::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *paddings, const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(NESpaceToBatchLayerKernel::validate(input, block_shape, paddings, output));
+
+    return Status{};
+}
+
+Status NESpaceToBatchLayer::validate(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const Size2D &padding_left, const Size2D &padding_right,
+                                     const ITensorInfo *output)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(NESpaceToBatchLayerKernel::validate(input, block_shape_x, block_shape_y, padding_left, padding_right, output));
+
+    return Status{};
+}
+
+void NESpaceToBatchLayer::run()
+{
+    // Zero out output only if we have paddings
+    if(_has_padding)
+    {
+        NEScheduler::get().schedule(&_memset_kernel, Window::DimY);
+    }
+    NEScheduler::get().schedule(&_space_to_batch_kernel, Window::DimY);
+}
+} // namespace arm_compute
diff --git a/tests/validation/NEON/SpaceToBatchLayer.cpp b/tests/validation/NEON/SpaceToBatchLayer.cpp
new file mode 100644
index 0000000..1d5ef06
--- /dev/null
+++ b/tests/validation/NEON/SpaceToBatchLayer.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/datasets/SpaceToBatchDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/SpaceToBatchFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(SpaceToBatchLayer)
+
+template <typename T>
+using NESpaceToBatchLayerFixture = SpaceToBatchLayerValidationFixture<Tensor, Accessor, NESpaceToBatchLayer, T>;
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),    // Mismatching data types
+                                                       TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),    // Wrong data type block shape
+                                                       TensorInfo(TensorShape(32U, 13U, 2U, 2U, 4U), 1, DataType::F32),    // Wrong tensor shape
+                                                     }),
+               framework::dataset::make("BlockShapeInfo",{ TensorInfo(TensorShape(2U), 1, DataType::S32),
+                                                       TensorInfo(TensorShape(2U), 1, DataType::S32),
+                                                       TensorInfo(TensorShape(2U), 1, DataType::F16),
+                                                       TensorInfo(TensorShape(2U), 1, DataType::S32),
+                                                     })),
+               framework::dataset::make("PaddingsShapeInfo",{ TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
+                                                       TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
+                                                       TensorInfo(TensorShape(2U, 2U), 1, DataType::F16),
+                                                       TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
+                                                     })),
+               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F16),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
+                                                     })),
+               framework::dataset::make("Expected", { true, false, false, false})),
+               input_info, block_shape_info, paddings_info, output_info, expected)
+{
+    bool has_error = bool(NESpaceToBatchLayer::validate(&input_info.clone()->set_is_resizable(false), &block_shape_info.clone()->set_is_resizable(false), &paddings_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false)));
+    ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
+}
+DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
+               framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),    // Mismatching data types
+                                                       TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),    // Negative block shapes
+                                                       TensorInfo(TensorShape(32U, 16U, 2U, 1U, 4U), 1, DataType::F32), // Wrong tensor shape
+                                                       TensorInfo(TensorShape(32U, 16U, 2U, 1U, 4U), 1, DataType::F32), // Wrong paddings
+                                                     }),
+               framework::dataset::make("BlockShapeX", { 2, 2, 2, 2, 2 })),
+               framework::dataset::make("BlockShapeY", { 2, 2, -2, 2, 2 })),
+               framework::dataset::make("PadLeft", { Size2D(0, 0), Size2D(0, 0), Size2D(0, 0), Size2D(0, 0), Size2D(3, 11) })),
+               framework::dataset::make("PadRight", { Size2D(0, 0), Size2D(0, 0), Size2D(0, 0), Size2D(0, 0), Size2D(3, 11) })),
+               framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 8U, 2U, 4U), 1, DataType::F16),
+                                                       TensorInfo(TensorShape(32U, 8U, 2U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 8U, 2U, 4U), 1, DataType::F32),
+                                                       TensorInfo(TensorShape(32U, 8U, 2U, 4U), 1, DataType::F32),
+                                                     })),
+               framework::dataset::make("Expected", { true, false, false, false, false})),
+               input_info, block_shape_x, block_shape_y, padding_left, padding_right, output_info, expected)
+{
+    bool has_error = bool(NESpaceToBatchLayer::validate(&input_info.clone()->set_is_resizable(false), block_shape_x, block_shape_y, padding_left, padding_right, &output_info.clone()->set_is_resizable(false)));
+    ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+                                                                                                                    DataType::F32)),
+                                                                                                            framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+                                                                                                                  DataType::F32)),
+                                                                                                          framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+                                                                                                                   DataType::F16)),
+                                                                                                           framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+                                                                                                                 DataType::F16)),
+                                                                                                         framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Float
+
+TEST_SUITE_END() // SpaceToBatch
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute