Port ClFullyConnected to new API

Resolves: COMPMID-4391

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Idcd5e22ed6e901c7f4c7530e5547ea6a7814ae59
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6025
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index 82d1621..9235a85 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -24,76 +24,14 @@
 #ifndef ARM_COMPUTE_CLFULLYCONNECTEDLAYER_H
 #define ARM_COMPUTE_CLFULLYCONNECTEDLAYER_H
 
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+#include "arm_compute/runtime/IFunction.h"
 
 #include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h"
-#include "arm_compute/runtime/CL/functions/CLFlattenLayer.h"
-#include "arm_compute/runtime/CL/functions/CLGEMM.h"
-#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
-#include "arm_compute/runtime/CL/functions/CLTranspose.h"
 #include "arm_compute/runtime/IWeightsManager.h"
 #include "arm_compute/runtime/MemoryGroup.h"
 
 namespace arm_compute
 {
-namespace weights_transformations
-{
-/** Basic function to manage the reshape weights generated from @ref CLTranspose */
-class CLFullyConnectedLayerReshapeWeightsManaged : public ITransformWeights
-{
-public:
-    //Inherited method override
-    void run() override
-    {
-        _output.allocator()->allocate();
-        _func.run();
-        _reshape_run = true;
-    }
-
-    //Inherited method override
-    void release() override
-    {
-        _output.allocator()->free();
-    }
-
-    //Inherited method override
-    ICLTensor *get_weights() override
-    {
-        return &_output;
-    }
-
-    //Inherited method override
-    uint32_t uid() override
-    {
-        return _uid;
-    }
-
-    /** Configures the @ref CLTranspose function
-     *
-     * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
-     */
-    void configure(const ICLTensor *input)
-    {
-        configure(CLKernelLibrary::get().get_compile_context(), input);
-    }
-    /** Configures the @ref CLTranspose function
-     *
-     * @param[in] compile_context The compile context to be used.
-     * @param[in] input           Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
-     */
-    void configure(const CLCompileContext &compile_context, const ICLTensor *input)
-    {
-        _func.configure(compile_context, input, &_output);
-    }
-
-private:
-    static constexpr uint32_t _uid = 0x0;
-    CLTensor                  _output{};
-    CLTranspose               _func{};
-};
-} // namespace weights_transformations
-
 /** Basic function to compute a Fully Connected layer on OpenCL. This function calls the following OpenCL kernels:
  *
  *  -# @ref opencl::kernels::ClIm2ColKernel (called when the input comes from a convolutional layer)
@@ -107,6 +45,8 @@
 public:
     /** Constructor */
     CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
+    /** Default destructor */
+    ~CLFullyConnectedLayer();
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     CLFullyConnectedLayer(const CLFullyConnectedLayer &) = delete;
     /** Default move constructor */
@@ -129,22 +69,6 @@
      * |QASYMM8        |QASYMM8            |S32    |QASYMM8        |
      * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32    |QASYMM8_SIGNED |
      *
-     * @param[in]  input   Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
-     * @param[in]  weights Weights tensor. The weights must be 2 dimensional.
-     *                     If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions.
-     *                     If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension.
-     *                     Data type supported: Same as @p input.
-     * @param[in]  biases  Bias tensor. Can be nullptr. Data type supported:Same as @p input.
-     * @param[out] output  Destination tensor. Its shape should be equal to the output of a matrix multiplication between:
-     *                     - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer
-     *                     - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer.
-     *                     Data type supported: Same as @p input.
-     * @param[in]  fc_info (Optional) Fully connected layer additional info
-     */
-    void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
-                   FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
-    /** Set the input and output tensors.
-     *
      * @param[in]  compile_context The compile context to be used.
      * @param[in]  input           Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
      * @param[in]  weights         Weights tensor. The weights must be 2 dimensional.
@@ -160,19 +84,15 @@
      */
     void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
                    FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
+    /** Set the input and output tensors.
+     *
+     * Similar to @ref CLFullyConnectedLayer
+     */
+    void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+                   FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
      *
-     * @param[in]  input   Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
-     * @param[in]  weights Weights tensor info. The weights must be 2 dimensional.
-     *                     If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions.
-     *                     If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension.
-     *                     Data type supported: Same as @p input.
-     * @param[in]  biases  Bias tensor info. Can be nullptr. Data type supported:Same as @p input.
-     * @param[out] output  Destination tensor info. Its shape should be equal to the output of a matrix multiplication between:
-     *                     - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer
-     *                     - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer.
-     *                     Data type supported: Same as @p input.
-     * @param[in]  fc_info (Optional) Fully connected layer additional info
+     * Similar to @ref CLFullyConnectedLayer
      *
      * @return a status
      */
@@ -184,28 +104,8 @@
     void prepare() override;
 
 private:
-    void configure_fc_fc(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const FullyConnectedLayerInfo &fc_info);
-    void configure_conv_fc(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const FullyConnectedLayerInfo &fc_info);
-    void configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const FullyConnectedLayerInfo &fc_info);
-
-    MemoryGroup                                                         _memory_group;
-    IWeightsManager                                                    *_weights_manager;
-    CLConvertFullyConnectedWeights                                      _convert_weights;
-    weights_transformations::CLConvertFullyConnectedWeightsManaged      _convert_weights_managed;
-    weights_transformations::CLFullyConnectedLayerReshapeWeightsManaged _reshape_weights_managed_function;
-    CLFlattenLayer                                                      _flatten_layer;
-    CLTranspose                                                         _reshape_weights_function;
-    CLGEMM                                                              _mm_gemm;
-    CLGEMMLowpMatrixMultiplyCore                                        _mm_gemmlowp;
-    CLTensor                                                            _flatten_output;
-    CLTensor                                                            _converted_weights_output;
-    CLTensor                                                            _reshape_weights_output;
-    bool                                                                _are_weights_converted;
-    bool                                                                _are_weights_reshaped;
-    bool                                                                _is_fc_after_conv;
-    bool                                                                _is_quantized;
-    bool                                                                _is_prepared;
-    const ICLTensor                                                    *_original_weights;
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CLFULLYCONNECTEDLAYER_H */