COMPMID-2109: Remove CL/NE Width/Depth ConcatenateLayer functions.

Change-Id: Icbda771abffbb45d4ed0958933c60ff9ace01314
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1178
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index e314f44..fbaab35 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -53,7 +53,6 @@
 #include "arm_compute/runtime/CL/functions/CLCropResize.h"
 #include "arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h"
 #include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h"
-#include "arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h"
 #include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
 #include "arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h"
 #include "arm_compute/runtime/CL/functions/CLDepthwiseSeparableConvolutionLayer.h"
@@ -143,7 +142,6 @@
 #include "arm_compute/runtime/CL/functions/CLUpsampleLayer.h"
 #include "arm_compute/runtime/CL/functions/CLWarpAffine.h"
 #include "arm_compute/runtime/CL/functions/CLWarpPerspective.h"
-#include "arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h"
 #include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
 #include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
 #include "arm_compute/runtime/CL/functions/CLYOLOLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
index d85a445..c56fc11 100644
--- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
@@ -26,7 +26,7 @@
 
 #include "arm_compute/runtime/IFunction.h"
 
-#include "arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h"
+#include "arm_compute/core/CL/ICLKernel.h"
 #include "arm_compute/core/Types.h"
 
 #include <memory>
@@ -41,9 +41,9 @@
 
 /** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels:
  *
- * -# @ref CLWidthConcatenateLayer (if underlying concatenation axis is 0).
+ * -# @ref CLWidthConcatenateLayerKernel (if underlying concatenation axis is 0).
  * -# @ref CLHeightConcatenateLayerKernel (if underlying concatenation axis is 1).
- * -# @ref CLDepthConcatenateLayer (if underlying concatenation axis is 2).
+ * -# @ref CLDepthConcatenateLayerKernel (if underlying concatenation axis is 2).
  */
 class CLConcatenateLayer : public IFunction
 {
@@ -53,7 +53,7 @@
     /** Initialise the kernel's inputs vector and output.
      *
      * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
-     * @note Preconditions can be found respectively at @ref CLWidthConcatenateLayer, @ref CLHeightConcatenateLayerKernel and @ref CLDepthConcatenateLayer.
+     * @note Preconditions can be found respectively at @ref CLWidthConcatenateLayerKernel, @ref CLHeightConcatenateLayerKernel and @ref CLDepthConcatenateLayerKernel.
      *
      * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
      * @param[out]    output        Output tensor. Data types supported: Same as @p input.
@@ -63,7 +63,7 @@
     /** Static function to check if given info will lead to a valid configuration of @ref CLConcatenateLayer
      *
      * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
-     * @note Preconditions can be found respectively at @ref CLWidthConcatenateLayer, @ref CLHeightConcatenateLayerKernel and @ref CLDepthConcatenateLayer.
+     * @note Preconditions can be found respectively at @ref CLWidthConcatenateLayerKernel, @ref CLHeightConcatenateLayerKernel and @ref CLDepthConcatenateLayerKernel.
      *
      * @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/F16/F32.
      * @param[in] output        Output tensor info. Data types supported: Same as @p input.
diff --git a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
deleted file mode 100644
index 9ef21f3..0000000
--- a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLDEPTHCONCATENATE_H__
-#define __ARM_COMPUTE_CLDEPTHCONCATENATE_H__
-
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
-#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
-
-#include <memory>
-#include <vector>
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- *
- * -# @ref CLFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref CLDepthConcatenateLayerKernel
- *
- */
-class CLDepthConcatenateLayer : public IFunction
-{
-public:
-    /** Default constructor */
-    CLDepthConcatenateLayer();
-    /** Initialise the kernel's inputs vector and output.
-     *
-     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
-     *                              Input dimensions might differ for each input for the first three dimensions (width, height, depth)
-     *                              and must match for the rest.
-     *                              Note that the difference between the minimum and maximum width and height among the input tensors
-     *                              must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
-     *                              height when they are less than the maximum input sizes.
-     * @param[out]    output        Output tensor. Data types supported: Same as @p input.
-     *                              Output tensor dimensions match the inputs' ones from the fourth dimension and above,
-     *                              while width and height are the maximum width and height of the input tensors.
-     *                              Finally, depth is the sum of the input depths.
-     */
-    void configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayer
-     *
-     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
-     *                          Input dimensions might differ for each input for the first three dimensions (width, height, depth)
-     *                          and must match for the rest.
-     *                          Note that the difference between the minimum and maximum width and height among the input tensors
-     *                          must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
-     *                          height when they are less than the maximum input sizes.
-     * @param[in] output        Output tensor. Data types supported: Same as @p input.
-     *                          Output tensor dimensions match the inputs' ones from the fourth dimension and above,
-     *                          while width and height are the maximum width and height of the input tensors.
-     *                          Finally, depth is the sum of the input depths.
-     *
-     * @return a status
-     */
-    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run() override;
-
-private:
-    std::vector<CLDepthConcatenateLayerKernel> _concat_kernels_vector;
-    std::vector<CLFillBorderKernel>            _border_handlers_vector;
-    unsigned int                               _num_inputs;
-};
-}
-#endif /* __ARM_COMPUTE_CLDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLLSTMLayer.h b/arm_compute/runtime/CL/functions/CLLSTMLayer.h
index 8bd47cb..3add152 100644
--- a/arm_compute/runtime/CL/functions/CLLSTMLayer.h
+++ b/arm_compute/runtime/CL/functions/CLLSTMLayer.h
@@ -35,10 +35,10 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLMemoryGroup.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h"
 #include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
 #include "arm_compute/runtime/CL/functions/CLGEMM.h"
-#include "arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h"
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/common/LSTMParams.h"
 
@@ -184,7 +184,7 @@
     CLActivationLayerKernel              _projection_clip;
     CLCopyKernel                         _copy_cell_state;
     CLCopyKernel                         _copy_output;
-    CLWidthConcatenateLayer              _concat_scratch_buffer;
+    CLConcatenateLayer                   _concat_scratch_buffer;
     CLWidthConcatenate2TensorsKernel     _concat_inputs_forget_gate;
     CLWidthConcatenate2TensorsKernel     _concat_weights_forget_gate;
     CLWidthConcatenate2TensorsKernel     _concat_weights_input_gate;
diff --git a/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h
deleted file mode 100644
index 6a30fcf..0000000
--- a/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLWIDTHCONCATENATELAYER_H__
-#define __ARM_COMPUTE_CLWIDTHCONCATENATELAYER_H__
-
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/CL/kernels/CLWidthConcatenate2TensorsKernel.h"
-#include "arm_compute/core/CL/kernels/CLWidthConcatenate4TensorsKernel.h"
-#include "arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h"
-
-#include <memory>
-#include <vector>
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Basic function to execute concatenate tensors along x axis. This function calls the following kernel:
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- *
- * -# @ref CLWidthConcatenateLayerKernel
- * -# @ref CLWidthConcatenate2TensorsKernel (if there are exactly 2 input tensors)
- * -# @ref CLWidthConcatenate4TensorsKernel (if there are exactly 4 input tensors)
- *
- */
-class CLWidthConcatenateLayer : public IFunction
-{
-public:
-    /** Default constructor */
-    CLWidthConcatenateLayer();
-    /** Initialise the kernel's inputs vector and output.
-     *
-     * @param[in]  inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
-     *                           Dimensions of all the inputs should match apart for the width which can differ.
-     * @param[out] output        Output tensor. Data types supported: Same as @p input.
-     *                           Output tensor dimensions are the same with the inputs from the second dimension and above.
-     *                           The first dimension (width) is the sum of the input tensors' widths.
-     */
-    void configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayerKernel
-     *
-     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
-     *                          Dimensions of all the inputs should match apart for the width which can differ.
-     * @param[in] output        Output tensor. Data types supported: Same as @p input.
-     *                          Output tensor dimensions are the same with the inputs from the second dimension and above.
-     *                          The first dimension (width) is the sum of the input tensors' widths.
-     *
-     * @return a status
-     */
-    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run() override;
-
-private:
-    std::vector<CLWidthConcatenateLayerKernel> _concat_kernels_vector;
-    CLWidthConcatenate2TensorsKernel           _concat_x2_kernel;
-    CLWidthConcatenate4TensorsKernel           _concat_x4_kernel;
-    unsigned int                               _num_inputs;
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_CLWIDTHCONCATENATELAYER_H__ */
diff --git a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h
index 7e01480..6727530 100644
--- a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h
+++ b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h
@@ -31,7 +31,6 @@
 #include "arm_compute/runtime/GLES_COMPUTE/functions/GCBatchNormalizationLayer.h"
 #include "arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h"
 #include "arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h"
-#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h"
 #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h"
 #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.h"
 #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDropoutLayer.h"
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h
deleted file mode 100644
index da00f38..0000000
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_GCDEPTHCONCATENATE_H__
-#define __ARM_COMPUTE_GCDEPTHCONCATENATE_H__
-
-#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
-#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h"
-#include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include <memory>
-#include <vector>
-
-namespace arm_compute
-{
-class IGCTensor;
-
-/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- * -# @ref GCFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref GCDepthConcatenateLayerKernel
- *
- */
-class GCDepthConcatenateLayer : public IFunction
-{
-public:
-    /** Default constructor */
-    GCDepthConcatenateLayer();
-    /** Initialise the kernel's inputs vector and output.
-     *
-     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: F16/F32.
-     * @param[out]    output        Output tensor. Data types supported: Same as @p input.
-     */
-    void configure(std::vector<IGCTensor *> inputs_vector, IGCTensor *output);
-
-    // Inherited methods overridden:
-    void run() override;
-
-private:
-    std::vector<std::unique_ptr<GCDepthConcatenateLayerKernel>> _concat_kernels_vector;
-    std::vector<std::unique_ptr<GCFillBorderKernel>>            _border_handlers_vector;
-    unsigned int                                                _num_inputs;
-};
-}
-#endif /* __ARM_COMPUTE_GCDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index d84422f..0d94ea7 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -51,7 +51,6 @@
 #include "arm_compute/runtime/NEON/functions/NECopy.h"
 #include "arm_compute/runtime/NEON/functions/NECropResize.h"
 #include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEDepthwiseSeparableConvolutionLayer.h"
@@ -142,7 +141,6 @@
 #include "arm_compute/runtime/NEON/functions/NEUpsampleLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEWarpAffine.h"
 #include "arm_compute/runtime/NEON/functions/NEWarpPerspective.h"
-#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEYOLOLayer.h"
 
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
index f8cda32..8c97efc 100644
--- a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -26,8 +26,9 @@
 
 #include "arm_compute/runtime/IFunction.h"
 
-#include "arm_compute/core/NEON/kernels/NEHeightConcatenateLayerKernel.h"
+#include "arm_compute/core/NEON/INEKernel.h"
 #include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/Requires.h"
 
 #include <memory>
 #include <vector>
@@ -41,9 +42,9 @@
 
 /** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels:
  *
- * -# @ref NEWidthConcatenateLayer (if underlying concatenation axis is 0).
+ * -# @ref NEWidthConcatenateLayerKernel (if underlying concatenation axis is 0).
  * -# @ref NEHeightConcatenateLayerKernel (if underlying concatenation axis is 1).
- * -# @ref NEDepthConcatenateLayer (if underlying concatenation axis is 2).
+ * -# @ref NEDepthConcatenateLayerKernel (if underlying concatenation axis is 2).
  */
 class NEConcatenateLayer : public IFunction
 {
@@ -53,17 +54,18 @@
     /** Initialise the kernel's inputs vector and output.
      *
      * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
-     * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayer.
+     * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayerKernel, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayerKernel.
      *
      * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
      * @param[out]    output        Output tensor. Data types supported: Same as @p input.
      * @param[in]     axis          Concatenation axis. Supported underlying concatenation axis are 0, 1 and 2.
      */
-    void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, size_t axis);
+    void configure(std::vector<ITensor *> inputs_vector, ITensor *output, size_t axis);
+    void configure(std::vector<const ITensor *> inputs_vector, ITensor *output, size_t axis);
     /** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
      *
      * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
-     * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayer.
+     * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayerKernel, @ref NEHeightConcatenateLayerKernel and @ref NEDepthConcatenateLayerKernel.
      *
      * @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/F16/F32.
      * @param[in] output        Output tensor info. Data types supported: Same as @p input.
@@ -72,11 +74,19 @@
      * @return a status
      */
     static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
+    static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
 
     // Inherited methods overridden:
     void run() override;
 
 private:
+    template <typename TensorType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorType>::type, ITensor>::value)>
+    void configure_internal(std::vector<TensorType *> &&inputs_vector, ITensor *output, size_t axis);
+
+    template <typename TensorInfoType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorInfoType>::type, ITensorInfo>::value)>
+    static Status validate_internal(const std::vector<TensorInfoType *> &inputs_vector, const ITensorInfo *output, size_t axis);
+
+private:
     std::vector<std::unique_ptr<INEKernel>> _concat_kernels;
     unsigned int                            _num_inputs;
     unsigned int                            _axis;
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
deleted file mode 100644
index b3bf752..0000000
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2017-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEDEPTHCONCATENATE_H__
-#define __ARM_COMPUTE_NEDEPTHCONCATENATE_H__
-
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
-#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
-
-#include <memory>
-#include <vector>
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Basic function to execute concatenate tensors along z axis. This function calls the following kernels:
- *
- * -# @ref NEFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
- * -# @ref NEDepthConcatenateLayerKernel
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- *
- */
-class NEDepthConcatenateLayer : public IFunction
-{
-public:
-    /** Default constructor */
-    NEDepthConcatenateLayer();
-    /** Initialise the kernel's inputs vector and output.
-     *
-     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
-     *                              Input dimensions might differ for each input for the first three dimensions (width, height, depth)
-     *                              and must match for the rest.
-     *                              Note that the difference between the minimum and maximum width and height among the input tensors
-     *                              must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
-     *                              height when they are less than the maximum input sizes.
-     * @param[out]    output        Output tensor. Data types supported: Same as @p input.
-     *                              Output tensor dimensions match the inputs' ones from the fourth dimension and above,
-     *                              while width and height are the maximum width and height of the input tensors.
-     *                              Finally, depth is the sum of the input depths.
-     */
-    void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConcatenateLayer
-     *
-     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
-     *                          Input dimensions might differ for each input for the first three dimensions (width, height, depth)
-     *                          and must match for the rest.
-     *                          Note that the difference between the minimum and maximum width and height among the input tensors
-     *                          must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
-     *                          height when they are less than the maximum input sizes.
-     * @param[in] output        Output tensor. Data types supported: Same as @p input.
-     *                          Output tensor dimensions match the inputs' ones from the fourth dimension and above,
-     *                          while width and height are the maximum width and height of the input tensors.
-     *                          Finally, depth is the sum of the input depths.
-     *
-     * @return a status
-     */
-    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run() override;
-
-private:
-    std::vector<ITensor *>                                      _inputs_vector;
-    std::vector<std::unique_ptr<NEDepthConcatenateLayerKernel>> _concat_kernels_vector;
-    std::vector<std::unique_ptr<NEFillBorderKernel>>            _border_handlers_vector;
-    unsigned int                                                _num_inputs;
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEDEPTHCONCATENATE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NELSTMLayer.h b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
index f3a1aa7..cf0f06c 100644
--- a/arm_compute/runtime/NEON/functions/NELSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NELSTMLayer.h
@@ -32,9 +32,9 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMM.h"
-#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
 #include "arm_compute/runtime/common/LSTMParams.h"
 
 namespace arm_compute
@@ -176,11 +176,11 @@
     NEActivationLayerKernel         _projection_clip;
     NECopyKernel                    _copy_cell_state;
     NECopyKernel                    _copy_output;
-    NEWidthConcatenateLayer         _concat_scratch_buffer;
-    NEWidthConcatenateLayer         _concat_inputs_forget_gate;
-    NEWidthConcatenateLayer         _concat_weights_forget_gate;
-    NEWidthConcatenateLayer         _concat_weights_input_gate;
-    NEWidthConcatenateLayer         _concat_weights_output;
+    NEConcatenateLayer              _concat_scratch_buffer;
+    NEConcatenateLayer              _concat_inputs_forget_gate;
+    NEConcatenateLayer              _concat_weights_forget_gate;
+    NEConcatenateLayer              _concat_weights_input_gate;
+    NEConcatenateLayer              _concat_weights_output;
     Tensor                          _input_gate_out1;
     Tensor                          _input_gate_out2;
     Tensor                          _input_gate_out3;
diff --git a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
deleted file mode 100644
index 8d22176..0000000
--- a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
-#define __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
-
-#include "arm_compute/core/utils/misc/Requires.h"
-
-#include <memory>
-#include <type_traits>
-#include <vector>
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** Basic function to execute concatenate tensors along x axis. This function calls the following kernel:
- *
- * -# @ref NEWidthConcatenateLayerKernel
- *
- * @deprecated This function is deprecated and will be removed in release 19.08
- */
-class NEWidthConcatenateLayer : public IFunction
-{
-public:
-    /** Default constructor */
-    NEWidthConcatenateLayer();
-    /** Initialise the kernel's inputs vector and output.
-     *
-     * @param[in]  inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
-     *                           Dimensions of all the inputs should match apart for the width which can differ.
-     * @param[out] output        Output tensor. Data types supported: Same as @p input.
-     *                           Output tensor dimensions are the same with the inputs from the second dimension and above.
-     *                           The first dimension (width) is the sum of the input tensors' widths.
-     */
-    void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
-    void configure(std::vector<const ITensor *> inputs_vector, ITensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEWidthConcatenateLayer
-     *
-     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
-     *                          Dimensions of all the inputs should match apart for the width which can differ.
-     * @param[in] output        Output tensor. Data types supported: Same as @p input.
-     *                          Output tensor dimensions are the same with the inputs from the second dimension and above.
-     *                          The first dimension (width) is the sum of the input tensors' widths.
-     *
-     * @return a status
-     */
-    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-    static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run() override;
-
-private:
-    std::vector<NEWidthConcatenateLayerKernel> _concat_kernels_vector;
-    unsigned int                               _num_inputs;
-    template <typename TensorType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorType>::type, ITensor>::value)>
-    void configure_internal(std::vector<TensorType *> &&inputs_vector, ITensor *output);
-    template <typename TensorInfoType, REQUIRES_TA(std::is_same<typename std::remove_cv<TensorInfoType>::type, ITensorInfo>::value)>
-    static Status validate_internal(const std::vector<TensorInfoType *> &inputs_vector, const ITensorInfo *output);
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__ */