COMPMID-1364: Add support for NHWC in NEDepthConcatenateLayer

Change-Id: I4f8e46d1c79afa9284f2c6dc00383c453a8e7bd5
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140165
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
new file mode 100644
index 0000000..2cdc720
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NECONCATENATELAYER_H__
+#define __ARM_COMPUTE_NECONCATENATELAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/Types.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+class ITensorInfo;
+class Status;
+
+/** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels:
+ *
+ * -# @ref NEWidthConcatenateLayer (if underlying concatenation axis is 0).
+ * -# @ref NEDepthConcatenateLayer (if underlying concatenation axis is 2).
+ */
+class NEConcatenateLayer : public IFunction
+{
+public:
+    /** Default constructor */
+    NEConcatenateLayer();
+    /** Initialise the kernel's inputs vector and output.
+     *
+     * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
+     * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer and @ref NEDepthConcatenateLayer.
+     *
+     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+     * @param[out]    output        Output tensor. Data types supported: Same as @p input.
+     * @param[in]     axis          Concatenation axis. Supported underlying concatenation axis are 0 and 2.
+     */
+    void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
+     *
+     * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
+     * @note Preconditions can be found respectively at @ref NEWidthConcatenateLayer and @ref NEDepthConcatenateLayer.
+     *
+     * @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/F16/F32.
+     * @param[in] output        Output tensor info. Data types supported: Same as @p input.
+     * @param[in] axis          Concatenation axis. Supported underlying concatenation axis are 0 and 2.
+     *
+     * @return a status
+     */
+    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    std::unique_ptr<IFunction> _concat_function;
+};
+}
+#endif /* __ARM_COMPUTE_NECONCATENATELAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index eefb5fa..e2162ef 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -49,10 +49,34 @@
     NEDepthConcatenateLayer();
     /** Initialise the kernel's inputs vector and output.
      *
-     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported:  F16/F32.
-     * @param[out]    output        Output tensor. Data types supported: Same as @p inputs_vector.
+     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+     *                              Input dimensions might differ for each input for the first three dimensions (width, height, depth)
+     *                              and must match for the rest.
+     *                              Note that the difference between the minimum and maximum width and height among the input tensors
+     *                              must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
+     *                              height when they are less than the maximum input sizes.
+     * @param[out]    output        Output tensor. Data types supported: Same as @p input.
+     *                              Output tensor dimensions match the inputs' ones from the fourth dimension and above,
+     *                              while width and height are the maximum width and height of the input tensors.
+     *                              Finally, depth is the sum of the input depths.
      */
-    void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
+    void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConcatenateLayer
+     *
+     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+     *                          Input dimensions might differ for each input for the first three dimensions (width, height, depth)
+     *                          and must match for the rest.
+     *                          Note that the difference between the minimum and maximum width and height among the input tensors
+     *                          must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
+     *                          height when they are less than the maximum input sizes.
+     * @param[in] output        Output tensor. Data types supported: Same as @p input.
+     *                          Output tensor dimensions match the inputs' ones from the fourth dimension and above,
+     *                          while width and height are the maximum width and height of the input tensors.
+     *                          Finally, depth is the sum of the input depths.
+     *
+     * @return a status
+     */
+    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
 
     // Inherited methods overridden:
     void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
new file mode 100644
index 0000000..e68525f
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
+#define __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Basic function to execute concatenate tensors along x axis. This function calls the following kernel:
+ *
+ * -# @ref NEWidthConcatenateLayerKernel
+ */
+class NEWidthConcatenateLayer : public IFunction
+{
+public:
+    /** Default constructor */
+    NEWidthConcatenateLayer();
+    /** Initialise the kernel's inputs vector and output.
+     *
+     * @param[in]  inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     *                           Dimensions of all the inputs should match apart for the width which can differ.
+     * @param[out] output        Output tensor. Data types supported: Same as @p input.
+     *                           Output tensor dimensions are the same with the inputs from the second dimension and above.
+     *                           The first dimension (width) is the sum of the input tensors' widths.
+     */
+    void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEWidthConcatenateLayer
+     *
+     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     *                          Dimensions of all the inputs should match apart for the width which can differ.
+     * @param[in] output        Output tensor. Data types supported: Same as @p input.
+     *                          Output tensor dimensions are the same with the inputs from the second dimension and above.
+     *                          The first dimension (width) is the sum of the input tensors' widths.
+     *
+     * @return a status
+     */
+    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    std::unique_ptr<NEWidthConcatenateLayerKernel[]> _concat_kernels_vector;
+    unsigned int                                     _num_inputs;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEWIDTHCONCATENATELAYER_H__ */