COMPMID-2051 Refactor shape_calculator::calculate_concatenate_shape

Change-Id: Ibf316718d11fa975d75f226925747b21c4efd127
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/974
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/src/core/CL/cl_kernels/concatenate.cl b/src/core/CL/cl_kernels/concatenate.cl
index 99f4659..23ebcf9 100644
--- a/src/core/CL/cl_kernels/concatenate.cl
+++ b/src/core/CL/cl_kernels/concatenate.cl
@@ -409,19 +409,19 @@
 __kernel void concatenate_depth(
     TENSOR3D_DECLARATION(src),
     TENSOR3D_DECLARATION(dst),
-    int3 offsets)
+    int offset)
 {
     Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
     Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
 
     VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-    source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&src, -offsets.x, -offsets.y, 0));
+    source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr);
 
 #if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
     source_values = requantize(source_values, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT);
 #endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
 
     VSTORE(VEC_SIZE)
-    (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offsets.z));
+    (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offset));
 }
 #endif /* defined(DATA_TYPE) && defined(VEC_SIZE) */
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 3fccc04..1cae371 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -47,19 +47,13 @@
 {
     ARM_COMPUTE_UNUSED(depth_offset);
 
-    // Configure kernel window
-    const int left_right = (output->dimension(0) - input->dimension(0)) / 2;
-    const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2;
-
     const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
-    const unsigned int num_elems_read_per_iteration      = 16 / input->element_size();
-    const unsigned int num_rows_read_per_iteration       = 1;
 
     // The window needs to be based on input as we copy all the depths of input
     Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
     win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1));
 
-    AccessWindowRectangle  input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration);
+    AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
     AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
     bool                   window_changed = update_window_and_padding(win, input_access, output_access);
     output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
@@ -74,30 +68,20 @@
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
 
+    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX));
+    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) != output->dimension(Window::DimY));
     ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2));
-    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0));
-    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1));
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output);
 
-    // The gaps between the two lowest dimensions of input and output need to be divisible by 2
-    // Otherwise it is not clear how the padding should be added onto the input tensor
-    ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2);
-    ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2);
-
     return Status{};
 }
 } // namespace
 
 CLDepthConcatenateLayerKernel::CLDepthConcatenateLayerKernel()
-    : _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0)
+    : _input(nullptr), _output(nullptr), _depth_offset(0)
 {
 }
 
-BorderSize CLDepthConcatenateLayerKernel::border_size() const
-{
-    return BorderSize(_top_bottom, _left_right);
-}
-
 void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int depth_offset, ICLTensor *output)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -125,10 +109,6 @@
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("concatenate_depth", build_opts.options()));
 
     // Configure kernel window
-    _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2;
-    _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2;
-
-    // Configure kernel window
     auto win_config = validate_and_configure_window(input->info(), depth_offset, output->info());
     ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
 
@@ -153,16 +133,8 @@
 
     const int offset_to_first_elements_in_bytes = _depth_offset * _output->info()->strides_in_bytes()[2];
 
-    unsigned int  idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
-    const cl_int3 offsets =
-    {
-        {
-            static_cast<cl_int>(_left_right),
-            static_cast<cl_int>(_top_bottom),
-            static_cast<cl_int>(offset_to_first_elements_in_bytes),
-        }
-    };
-    _kernel.setArg<cl_int3>(idx, offsets);
+    unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
+    _kernel.setArg<cl_int>(idx, offset_to_first_elements_in_bytes);
 
     do
     {
diff --git a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs
index 69ac50b..49b3954 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -53,7 +53,7 @@
     Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
     Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
 
-    float tmp = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, -OFFSET_X, -OFFSET_Y, 0));
+    float tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
     STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp);
 }
 
@@ -66,7 +66,7 @@
     Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
     Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
 
-    uvec2 tmp = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, -OFFSET_X, -OFFSET_Y, 0));
+    uvec2 tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
     STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp);
 }
 #endif /*DATA_TYPE_FP16*/
diff --git a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
index 36d1b29..6f70efe 100644
--- a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,28 +38,18 @@
 using namespace arm_compute;
 
 GCDepthConcatenateLayerKernel::GCDepthConcatenateLayerKernel()
-    : _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0)
+    : _input(nullptr), _output(nullptr), _depth_offset(0)
 {
 }
-
-BorderSize GCDepthConcatenateLayerKernel::border_size() const
-{
-    return BorderSize(_top_bottom, _left_right);
-}
-
 void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned int depth_offset, IGCTensor *output)
 {
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-    ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) + depth_offset > output->info()->dimension(2));
-    ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) > output->info()->dimension(0));
-    ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) > output->info()->dimension(1));
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(3, input, output);
 
-    // The gaps between the two lowest dimensions of input and output need to be divisible by 2
-    // Otherwise it is not clear how the padding should be added onto the input tensor
-    ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) - input->info()->dimension(0)) % 2);
-    ARM_COMPUTE_ERROR_ON((output->info()->dimension(1) - input->info()->dimension(1)) % 2);
+    ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimX) != output->info()->dimension(Window::DimX));
+    ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimY) != output->info()->dimension(Window::DimY));
+    ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) + depth_offset > output->info()->dimension(2));
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(3, input, output);
 
     _input        = input;
     _output       = output;
@@ -73,35 +63,20 @@
     build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1));
     build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1));
 
-    // Configure kernel window
-    _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2;
-    _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2;
-
-    build_opts.emplace("#define OFFSET_X " + support::cpp11::to_string(_left_right));
-    build_opts.emplace("#define OFFSET_Y " + support::cpp11::to_string(_top_bottom));
-
     // Create kernel
     _kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel("concatenate_depth", build_opts));
 
     unsigned int num_elems_processed_per_iteration = 1;
-    unsigned int num_elems_read_per_iteration      = 1;
-    if(input->info()->data_type() == DataType::F32)
-    {
-        num_elems_processed_per_iteration = 1;
-        num_elems_read_per_iteration      = 1;
-    }
-    else if(input->info()->data_type() == DataType::F16)
+    if(input->info()->data_type() == DataType::F16)
     {
         num_elems_processed_per_iteration = 4;
-        num_elems_read_per_iteration      = 4;
     }
-    const unsigned int num_rows_read_per_iteration = 1;
 
     // The window needs to be based on input as we copy all the depths of input
     Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
     win.set(Window::DimZ, Window::Dimension(0, input->info()->tensor_shape().z(), 1));
 
-    AccessWindowRectangle  input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration);
+    AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
     AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
     update_window_and_padding(win, input_access, output_access);
     output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
@@ -118,11 +93,9 @@
 
     _output->set_needs_shifting(true);
 
-    Window slice     = window.first_slice_window_3D();
     Window slice_in  = window.first_slice_window_3D();
     Window slice_out = window.first_slice_window_3D();
 
-    slice.shift(Window::DimX, -(_output->info()->padding()).left);
     slice_out.set(Window::DimZ, Window::Dimension(_depth_offset));
 
     do
@@ -133,7 +106,7 @@
 
         _kernel.update_shader_params();
 
-        enqueue(*this, slice);
+        enqueue(*this, slice_in);
     }
-    while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in));
+    while(window.slide_window_slice_3D(slice_in));
 }
diff --git a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
index 8352c94..b360e9e 100644
--- a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
@@ -42,18 +42,13 @@
 namespace
 {
 template <typename T>
-void depth_concat(const ITensor *in, ITensor *out, std::pair<int, int> start_xy, int depth_offset, const Window &window)
+void depth_concat(const ITensor *in, ITensor *out, int depth_offset, const Window &window)
 {
-    const int start_x = start_xy.first;
-    const int start_y = start_xy.second;
-
     // Offset input
-    const int input_offset_to_first_elements_in_bytes = in->info()->offset_first_element_in_bytes() - start_x * in->info()->strides_in_bytes()[0] - start_y * in->info()->strides_in_bytes()[1];
-    uint8_t *input_ptr                               = in->buffer() + input_offset_to_first_elements_in_bytes;
+    uint8_t *input_ptr = in->buffer() + in->info()->offset_first_element_in_bytes();
 
     // Offset output
-    const unsigned int output_offset_to_first_elements_in_bytes = out->info()->offset_first_element_in_bytes() + depth_offset * out->info()->strides_in_bytes()[2];
-    uint8_t           *output_ptr                               = out->buffer() + output_offset_to_first_elements_in_bytes;
+    uint8_t *output_ptr = out->buffer() + out->info()->offset_first_element_in_bytes() + depth_offset * out->info()->strides_in_bytes()[2];
 
     Iterator input(in, window);
     Iterator output(out, window);
@@ -88,19 +83,13 @@
 {
     ARM_COMPUTE_UNUSED(depth_offset);
 
-    // Configure kernel window
-    const int left_right = (output->dimension(0) - input->dimension(0)) / 2;
-    const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2;
-
     const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
-    const unsigned int num_elems_read_per_iteration      = 16 / input->element_size();
-    const unsigned int num_rows_read_per_iteration       = 1;
 
     // The window needs to be based on input as we copy all the depths of input
     Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
     win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1));
 
-    AccessWindowRectangle  input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration);
+    AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
     AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
     bool                   window_changed = update_window_and_padding(win, input_access, output_access);
     output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
@@ -116,30 +105,20 @@
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
 
+    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX));
+    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) != output->dimension(Window::DimY));
     ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2));
-    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0));
-    ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1));
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output);
 
-    // The gaps between the two lowest dimensions of input and output need to be divisible by 2
-    // Otherwise it is not clear how the padding should be added onto the input tensor
-    ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2);
-    ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2);
-
     return Status{};
 }
 } // namespace
 
 NEDepthConcatenateLayerKernel::NEDepthConcatenateLayerKernel()
-    : _func(nullptr), _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0)
+    : _func(nullptr), _input(nullptr), _output(nullptr), _depth_offset(0)
 {
 }
 
-BorderSize NEDepthConcatenateLayerKernel::border_size() const
-{
-    return BorderSize(_top_bottom, _left_right);
-}
-
 void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int depth_offset, ITensor *output)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -149,8 +128,6 @@
     _input        = input;
     _output       = output;
     _depth_offset = depth_offset;
-    _left_right   = (output->info()->dimension(0) - input->info()->dimension(0)) / 2;
-    _top_bottom   = (output->info()->dimension(1) - input->info()->dimension(1)) / 2;
 
     switch(input->info()->data_type())
     {
@@ -190,5 +167,5 @@
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
     ARM_COMPUTE_ERROR_ON(_func == nullptr);
 
-    (*_func)(_input, _output, std::make_pair(_left_right, _top_bottom), _depth_offset, window);
+    (*_func)(_input, _output, _depth_offset, window);
 }
diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp
index 48da8b6..ff515c4 100644
--- a/src/graph/nodes/ConcatenateLayerNode.cpp
+++ b/src/graph/nodes/ConcatenateLayerNode.cpp
@@ -68,6 +68,7 @@
 
     TensorDescriptor output_descriptor = input_descriptors[0];
     const int        axis_idx          = get_dimension_idx(output_descriptor.layout, axis);
+    ARM_COMPUTE_ERROR_ON_MSG(axis_idx > 2, "Unsupported concatenation axis!");
 
     // Extract shapes
     std::vector<const TensorShape *> shapes;
@@ -76,18 +77,7 @@
         shapes.emplace_back(&input_descriptor.shape);
     }
 
-    if(axis_idx < 2)
-    {
-        output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx);
-    }
-    else if(axis_idx == 2)
-    {
-        output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(shapes);
-    }
-    else
-    {
-        ARM_COMPUTE_ERROR("Unsupported concatenation axis!");
-    }
+    output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx);
 
     return output_descriptor;
 }
diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp
index b9b3c5b..b8224d2 100644
--- a/src/runtime/CL/functions/CLConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp
@@ -56,15 +56,7 @@
         ARM_COMPUTE_ERROR_ON_NULLPTR(t);
         return t->info();
     });
-    TensorShape output_shape{};
-    if(_axis == Window::DimZ)
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
-    }
-    else
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
-    }
+    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
 
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
@@ -143,19 +135,6 @@
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
     ARM_COMPUTE_RETURN_ERROR_ON(num_inputs < 2);
 
-    // Output auto inizialitation if not yet initialized
-    TensorInfo  tmp_output_info = *output->clone();
-    TensorShape output_shape{};
-    if(axis == Window::DimZ)
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
-    }
-    else
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
-    }
-    auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
-
     unsigned int offset = 0;
     switch(axis)
     {
@@ -166,19 +145,19 @@
                 case 2:
                     // Validate WidthConcatenate2Tensors kernels if there are 2 inputs
                     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1]);
-                    ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], &tmp_output_info));
+                    ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], output));
                     break;
                 case 4:
                     // Validate WidthConcatenate4Tensors kernels if there are 4 inputs
                     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3]);
-                    ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], &tmp_output_info));
+                    ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], output));
                     break;
                 default:
                     // Validate generic case of WidthConcatenate kernel
                     for(const auto &input : inputs_vector)
                     {
                         ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
-                        ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+                        ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, output));
                         offset += input->dimension(axis);
                     }
                     break;
@@ -189,7 +168,7 @@
         {
             for(const auto &input : inputs_vector)
             {
-                ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+                ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, output));
                 offset += input->dimension(axis);
             }
             break;
@@ -198,7 +177,7 @@
         {
             for(const auto &input : inputs_vector)
             {
-                ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+                ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, output));
                 offset += input->dimension(axis);
             }
             break;
@@ -207,6 +186,12 @@
             ARM_COMPUTE_ERROR("Axis not supported");
     }
 
+    if(output->total_size() != 0)
+    {
+        TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
+        ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size());
+    }
+
     return Status{};
 }
 
diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
index e46647a..4a5f845 100644
--- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
@@ -56,7 +56,7 @@
     _concat_kernels_vector  = arm_compute::support::cpp14::make_unique<CLDepthConcatenateLayerKernel[]>(_num_inputs);
     _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_inputs);
 
-    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector_info);
+    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ);
 
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
@@ -82,7 +82,7 @@
 
     // Output auto inizialitation if not yet initialized
     TensorInfo  tmp_output_info = *output->clone();
-    TensorShape output_shape    = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
+    TensorShape output_shape    = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimZ);
     auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
 
     unsigned int depth_offset = 0;
diff --git a/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp
new file mode 100644
index 0000000..506f648
--- /dev/null
+++ b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h"
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+GCConcatenateLayer::GCConcatenateLayer()
+    : _concat_kernels(),
+      _num_inputs(0),
+      _axis(Window::DimZ)
+{
+}
+
+void GCConcatenateLayer::configure(std::vector<IGCTensor *> inputs_vector, IGCTensor *output, size_t axis)
+{
+    ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2);
+
+    _num_inputs = inputs_vector.size();
+    _axis       = axis;
+
+    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
+
+    // Output auto inizialitation if not yet initialized
+    auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
+
+    unsigned int offset = 0;
+    switch(axis)
+    {
+        case Window::DimZ:
+        {
+            for(unsigned int i = 0; i < _num_inputs; ++i)
+            {
+                auto kernel = support::cpp14::make_unique<GCDepthConcatenateLayerKernel>();
+                kernel->configure(inputs_vector.at(i), offset, output);
+                offset += inputs_vector.at(i)->info()->dimension(axis);
+                _concat_kernels.emplace_back(std::move(kernel));
+            }
+            break;
+        }
+        default:
+            ARM_COMPUTE_ERROR("Axis not supported");
+    }
+}
+
+void GCConcatenateLayer::run()
+{
+    for(auto &kernel : _concat_kernels)
+    {
+        GCScheduler::get().dispatch(*kernel, true);
+    }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index e02c0c2..b8cfa2b 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -56,15 +56,7 @@
         ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i));
         inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
     }
-    TensorShape output_shape{};
-    if(_axis == Window::DimZ)
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
-    }
-    else
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
-    }
+    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
 
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
@@ -109,19 +101,6 @@
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
     ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
 
-    // Output auto inizialitation if not yet initialized
-    TensorInfo  tmp_output_info = *output->clone();
-    TensorShape output_shape{};
-    if(axis == Window::DimZ)
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
-    }
-    else
-    {
-        output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
-    }
-    auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
-
     unsigned int offset = 0;
     for(const auto &input : inputs_vector)
     {
@@ -130,17 +109,17 @@
         {
             case Window::DimX:
             {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+                ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, output));
                 break;
             }
             case Window::DimY:
             {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+                ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, output));
                 break;
             }
             case Window::DimZ:
             {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+                ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, output));
                 break;
             }
             default:
@@ -149,6 +128,12 @@
         offset += input->dimension(axis);
     }
 
+    if(output->total_size() != 0)
+    {
+        TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
+        ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size());
+    }
+
     return Status{};
 }
 
diff --git a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
index 49db855..b814bff 100644
--- a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -54,7 +54,7 @@
     {
         inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
     }
-    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector_info);
+    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ);
 
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
@@ -80,7 +80,7 @@
 
     // Output auto inizialitation if not yet initialized
     TensorInfo  tmp_output_info = *output->clone();
-    TensorShape output_shape    = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
+    TensorShape output_shape    = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimZ);
     auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
 
     unsigned int depth_offset = 0;