COMPMID-1364: Add support for NHWC in NEDepthConcatenateLayer

Change-Id: I4f8e46d1c79afa9284f2c6dc00383c453a8e7bd5
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140165
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index eefb5fa..e2162ef 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -49,10 +49,34 @@
     NEDepthConcatenateLayer();
     /** Initialise the kernel's inputs vector and output.
      *
-     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported:  F16/F32.
-     * @param[out]    output        Output tensor. Data types supported: Same as @p inputs_vector.
+     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+     *                              Input dimensions might differ for each input for the first three dimensions (width, height, depth)
+     *                              and must match for the rest.
+     *                              Note that the difference between the minimum and maximum width and height among the input tensors
+     *                              must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
+     *                              height when they are less than the maximum input sizes.
+     * @param[out]    output        Output tensor. Data types supported: Same as @p input.
+     *                              Output tensor dimensions match the inputs' ones from the fourth dimension and above,
+     *                              while width and height are the maximum width and height of the input tensors.
+     *                              Finally, depth is the sum of the input depths.
      */
-    void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
+    void configure(const std::vector<ITensor *> &inputs_vector, ITensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConcatenateLayer
+     *
+     * @param[in] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
+     *                          Input dimensions might differ for each input for the first three dimensions (width, height, depth)
+     *                          and must match for the rest.
+     *                          Note that the difference between the minimum and maximum width and height among the input tensors
+     *                          must be divisible by 2 otherwise it is not clear how padding should be added on the inputs' width and
+     *                          height when they are less than the maximum input sizes.
+     * @param[in] output        Output tensor. Data types supported: Same as @p input.
+     *                          Output tensor dimensions match the inputs' ones from the fourth dimension and above,
+     *                          while width and height are the maximum width and height of the input tensors.
+     *                          Finally, depth is the sum of the input depths.
+     *
+     * @return a status
+     */
+    static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
 
     // Inherited methods overridden:
     void run() override;