COMPMID-1047 Extract Flatten function from Im2Col for NEON

Change-Id: I80f3aaadc8cae8c9ca1a5a239e79bda302b89bd8
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144813
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/arm_compute/runtime/CL/functions/CLFlattenLayer.h b/arm_compute/runtime/CL/functions/CLFlattenLayer.h
index ebc0e5e..24ed56f 100644
--- a/arm_compute/runtime/CL/functions/CLFlattenLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFlattenLayer.h
@@ -47,7 +47,7 @@
      *                    w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
      */
     void configure(const ICLTensor *input, ICLTensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLTranspose
+    /** Static function to check if given info will lead to a valid configuration of @ref CLFlattenLayer
      *
      * @param[in]  input  First input tensor to flatten with at least 3 dimensions.
      *                    The dimensions above the third will be interpreted as batches. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
diff --git a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
index 2c259fa..26d7c7f 100644
--- a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
@@ -31,11 +31,7 @@
 {
 class ITensor;
 
-/** Basic function to execute flatten. This function calls the following NEON kernel:
-*
-* -# @ref NEIm2ColKernel
-*
-*/
+/** Basic function to execute flatten layer kernel. */
 class NEFlattenLayer : public INESimpleFunction
 {
 public:
@@ -46,6 +42,17 @@
      *             w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
      */
     void configure(const ITensor *input, ITensor *output);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref NEFlattenLayer
+     *
+     * @param[in]  input  First input tensor to flatten with at least 3 dimensions.
+     *                    The dimensions above the third will be interpreted as batches. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[out] output Output tensor with shape [w*h*d, input_batches] where:
+     *                    w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output);
 };
 } // namespace arm_compute
 
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index fe0f2f0..9c9074c 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -26,8 +26,8 @@
 
 #include "arm_compute/runtime/IFunction.h"
 
+#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h"
-#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h"
 #include "arm_compute/core/NEON/kernels/NETransposeKernel.h"
 #include "arm_compute/runtime/MemoryGroup.h"
 #include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h"
@@ -129,14 +129,14 @@
     void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output);
 
     MemoryGroup                                         _memory_group;
-    NEIm2ColKernel                                      _im2col_kernel;
+    NEFlattenLayerKernel                                _flatten_kernel;
     NEConvertFullyConnectedWeights                      _convert_weights;
     NEFullyConnectedLayerReshapeWeights                 _reshape_weights_function;
     NEGEMM                                              _mm_gemm;
     NEGEMMLowpMatrixMultiplyCore                        _mm_gemmlowp;
     NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
     NEGEMMMatrixAccumulateBiasesKernel                  _accumulate_biases_kernel;
-    Tensor                                              _im2col_output;
+    Tensor                                              _flatten_output;
     Tensor                                              _gemmlowp_output;
     Tensor                                              _converted_weights_output;
     Tensor                                              _reshape_weights_output;
diff --git a/arm_compute/runtime/NEON/functions/NEIm2Col.h b/arm_compute/runtime/NEON/functions/NEIm2Col.h
index 9df4f07..de4780f 100644
--- a/arm_compute/runtime/NEON/functions/NEIm2Col.h
+++ b/arm_compute/runtime/NEON/functions/NEIm2Col.h
@@ -42,38 +42,34 @@
     NEIm2Col();
     /** Configure the im2col NEON kernel
      *
-     * @param[in]  input              The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
-     *                                while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
-     *                                Note: QASYMM8 works only for has_bias = false
-     * @param[out] output             The output tensor. Data types supported: Same as @p input
-     * @param[in]  kernel_dims        The kernel dimensions (width and height).
-     * @param[in]  conv_info          Contains padding and stride information described in @ref PadStrideInfo.
-     * @param[in]  has_bias           In case biases are provided expands the matrix with 1.
-     * @param[in]  dilation           (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
-     * @param[in]  num_groups         (Optional) Number of groups when performing a grouped convolution
-     * @param[in]  is_fully_connected (Optional) Determines whether this function will be called by @ref NEFullyConnectedLayer in order to validate the arguments
-     * @param[in]  is_flatten         (Optional) Determines whether this function will be called by @ref NEFlattenLayer in order to validate the arguments
+     * @param[in]  input       The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
+     *                         while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
+     *                         Note: QASYMM8 works only for has_bias = false
+     * @param[out] output      The output tensor. Data types supported: Same as @p input
+     * @param[in]  kernel_dims The kernel dimensions (width and height).
+     * @param[in]  conv_info   Contains padding and stride information described in @ref PadStrideInfo.
+     * @param[in]  has_bias    In case biases are provided expands the matrix with 1.
+     * @param[in]  dilation    (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+     * @param[in]  num_groups  (Optional) Number of groups when performing a grouped convolution
      */
     void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U),
-                   unsigned int num_groups = 1, bool is_fully_connected = false, bool is_flatten = false);
+                   unsigned int num_groups = 1);
     /** Static function to check if given info will lead to a valid configuration of @ref NEIm2Col
      *
-     * @param[in] input              The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
-     *                               while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
-     *                               Note: QASYMM8 works only for has_bias = false
-     * @param[in] output             The output tensor. Data types supported: Same as @p input
-     * @param[in] kernel_dims        The kernel dimensions (width and height).
-     * @param[in] conv_info          Contains padding and stride information described in @ref PadStrideInfo.
-     * @param[in] has_bias           In case biases are provided expands the matrix with 1.
-     * @param[in] dilation           (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
-     * @param[in] num_groups         (Optional) Number of groups when performing a grouped convolution
-     * @param[in] is_fully_connected (Optional) Determines whether this function will be called by @ref NEFullyConnectedLayer in order to validate the arguments
-     * @param[in] is_flatten         (Optional) Determines whether this function will be called by @ref NEFlattenLayer in order to validate the arguments
+     * @param[in] input       The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
+     *                        while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
+     *                        Note: QASYMM8 works only for has_bias = false
+     * @param[in] output      The output tensor. Data types supported: Same as @p input
+     * @param[in] kernel_dims The kernel dimensions (width and height).
+     * @param[in] conv_info   Contains padding and stride information described in @ref PadStrideInfo.
+     * @param[in] has_bias    In case biases are provided expands the matrix with 1.
+     * @param[in] dilation    (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
+     * @param[in] num_groups  (Optional) Number of groups when performing a grouped convolution
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U),
-                           unsigned int num_groups = 1, bool is_fully_connected = false, bool is_flatten = false);
+                           unsigned int num_groups = 1);
 
     // Inherited methods overridden:
     void run() override;