COMPMID-1386: Add support for converting weights for CL.

Change-Id: I62e3ead903366baeeb1488f233a9b8b0c388c9de
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140403
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
index 77e9d15..9bfade4 100644
--- a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
@@ -39,7 +39,7 @@
      *
      * @param[in]  input                Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
      * @param[out] output               The converted weights tensor. Shape and Data Type: Same as @p input.
-     * @param[in]  original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+     * @param[in]  original_input_shape Shape of the original input tensor (the one entering fully connected layer).
      * @param[in]  data_layout          The data layout the weights have been trained in.
      */
     void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
@@ -47,7 +47,7 @@
      *
      * @param[in] input                Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
      * @param[in] output               The converted weights tensor info. Shape and Data Type: Same as @p input.
-     * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
+     * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer).
      * @param[in] data_layout          The data layout the weights have been trained in.
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output, const TensorShape &original_input_shape, DataLayout data_layout);
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index 3357868..6b8d7a9 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -31,6 +31,7 @@
 #include "arm_compute/core/CL/kernels/CLTransposeKernel.h"
 #include "arm_compute/runtime/CL/CLMemoryGroup.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h"
 #include "arm_compute/runtime/CL/functions/CLGEMM.h"
 #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
 #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
@@ -86,32 +87,26 @@
     CLFullyConnectedLayer &operator=(CLFullyConnectedLayer &&) = default;
     /** Set the input and output tensors.
      *
-     * @param[in]  input                   Source tensor. Data type supported: QASYMM8/F16/F32.
-     * @param[in]  weights                 Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
-     * @param[in]  biases                  Bias tensor. It can be nullptr. Data type supported:Same as @p input.
-     * @param[out] output                  Destination tensor. Data type supported: Same as @p input.
-     * @param[in]  transpose_weights       (Optional) Transpose weights if true. Defaults to true.
-     * @param[in]  are_weights_reshaped    (Optional) Reshape the weights tensor if false. Defaults to false.
-     * @param[in]  retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
-     *                                     Used for reconfiguration purposes.
+     * @param[in]  input   Source tensor. Data type supported: QASYMM8/F16/F32.
+     * @param[in]  weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+     * @param[in]  biases  Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+     * @param[out] output  Destination tensor. Data type supported: Same as @p input.
+     * @param[in]  fc_info (Optional) Fully connected layer additional info
      */
-    void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false,
-                   bool retain_internal_weights = false);
+    void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+                   FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
      *
-     * @param[in] input                   Source tensor. Data type supported: QASYMM8/F16/F32.
-     * @param[in] weights                 Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
-     * @param[in] biases                  Bias tensor. It can be nullptr. Data type supported:Same as @p input.
-     * @param[in] output                  Destination tensor. Data type supported: Same as @p input.
-     * @param[in] transpose_weights       (Optional) Transpose weights if true. Defaults to true.
-     * @param[in] are_weights_reshaped    (Optional) Reshape the weights tensor if false. Defaults to false.
-     * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
-     *                                    Used for reconfiguration purposes.
+     * @param[in] input   Source tensor. Data type supported: QASYMM8/F16/F32.
+     * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+     * @param[in] biases  Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+     * @param[in] output  Destination tensor. Data type supported: Same as @p input.
+     * @param[in] fc_info (Optional) Fully connected layer additional info
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false,
-                           bool retain_internal_weights = false);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+                           FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
 
     //Inherited methods override
     void run() override;
@@ -124,6 +119,7 @@
 
     CLMemoryGroup                                       _memory_group;
     CLIm2ColKernel                                      _im2col_kernel;
+    CLConvertFullyConnectedWeights                      _convert_weights;
     CLFullyConnectedLayerReshapeWeights                 _reshape_weights_kernel;
     CLGEMM                                              _mm_gemm;
     CLGEMMLowpMatrixMultiplyCore                        _mm_gemmlowp;
@@ -131,11 +127,14 @@
     CLGEMMMatrixAccumulateBiasesKernel                  _accumulate_biases_kernel;
     CLTensor                                            _im2col_output;
     CLTensor                                            _gemmlowp_output;
+    CLTensor                                            _converted_weights_output;
     CLTensor                                            _reshape_weights_output;
+    bool                                                _are_weights_converted;
     bool                                                _are_weights_reshaped;
     bool                                                _is_fc_after_conv;
     bool                                                _accumulate_biases;
     bool                                                _is_quantized;
+    bool                                                _is_prepared;
     const ICLTensor                                    *_original_weights;
 };
 }