Optimize CL DeconvolutionLayer-Part II: Add CLDirectDeconvolution function to be used by CLDeconvolution.

This is only a code refactoring (no optimizations have been added)

Change-Id: I78488f4aecfe1cce93c31dba31489dcee4c85c67
Signed-off-by: giuros01 <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/895
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 46e43dc..f102184 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -61,6 +61,7 @@
 #include "arm_compute/runtime/CL/functions/CLDerivative.h"
 #include "arm_compute/runtime/CL/functions/CLDilate.h"
 #include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h"
 #include "arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h"
 #include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLEqualizeHistogram.h"
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
index 9c115f8..b613708 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
@@ -24,13 +24,7 @@
 #ifndef __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__
 #define __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__
 
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h"
-
-#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h"
-
-#include "arm_compute/runtime/CL/CLMemoryGroup.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h"
 #include "arm_compute/runtime/IFunction.h"
 #include "arm_compute/runtime/IMemoryManager.h"
 
@@ -38,51 +32,16 @@
 
 namespace arm_compute
 {
-class ICLTensor;
-/** Function to run the deconvolution layer.
+/** Basic function to compute the deconvolution layer. This function calls the following OpenCL kernels/functions:
  *
- * Deconvolution Layer is the backward pass of Convolution Layer. First we transform the input depending on the stride and pad info and then perform a 1x1
- * convolution pass. Input stride defines how many zeroes we should put between each element of the input, pad is the amount of padding and finally a is a user
- * specified value where a < stride - 1, that increases the padding top and right of the input image.
- *
- *  The relation between input to output is as follows:
- *  \f[
- *       width\_output = (width\_input - 1) \cdot stride\_x - 2 \cdot padding\_x + kernel\_x
- *  \f]
- *  \f[
- *       height\_output = (height\_input - 1) \cdot stride\_y - 2 \cdot padding\_y + kernel\_y
- *  \f]
- *
- *  where:
- *      width_input is the size of the first input dimension.
- *      height_input is the size of the second input dimension.
- *      width_output is the size of the first output dimension.
- *      height_output is the size of the second output dimension.
- *      kernel_x and kernel_y are the convolution sizes in x and y.
- *      stride_x and stride_y is the input stride of the first and second dimension.
- *
- * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution. Therefore, it will be necessary to use the weights in the
- * reverse order to perform an actual convolution. This is achieved by using the @ref CPPFlipWeightsKernel.
- *
- * This function calls the following OpenCL kernels/functions:
- *
- * -# @ref CLDeconvolutionLayerUpsample
- * -# @ref CLConvolutionLayer
- *
+ * -# @ref CLDirectDeconvolutionLayer
  */
 class CLDeconvolutionLayer : public IFunction
 {
 public:
-    /** Constructor */
+    /** Default constructor */
     CLDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLDeconvolutionLayer(const CLDeconvolutionLayer &) = delete;
-    /** Default move constructor */
-    CLDeconvolutionLayer(CLDeconvolutionLayer &&) = default;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLDeconvolutionLayer &operator=(const CLDeconvolutionLayer &) = delete;
-    /** Default move assignment operator */
-    CLDeconvolutionLayer &operator=(CLDeconvolutionLayer &&) = default;
+
     /** Set the input, weights, biases and output tensors.
      *
      * @deprecated This method is deprecated and will be removed in release 19.05
@@ -91,13 +50,13 @@
      * @param[in]     weights            The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
      * @param[in]     bias               (Optional) The biases have one dimension. Data type supported: Same as @p input.
      * @param[out]    output             Output tensor. The output has the same number of dimensions as the @p input.
-     * @param[in]     info               Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
+     * @param[in]     deconv_info        Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
      * @param[in]     inner_border_right The number of zeros added to right edge of the input.
      * @param[in]     inner_border_top   The number of zeros added to top edge of the input.
      * @param[in]     weights_info       (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
      *
      */
-    void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info,
+    void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info,
                    unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
      *
@@ -107,14 +66,14 @@
      * @param[in] weights            The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
      * @param[in] bias               (Optional) The biases have one dimension. Data type supported: Same as @p input.
      * @param[in] output             Output tensor info. The output has the same number of dimensions as the @p input.
-     * @param[in] info               Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
+     * @param[in] deconv_info        Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
      * @param[in] inner_border_right The number of zeros added to right edge of the input.
      * @param[in] inner_border_top   The number of zeros added to top edge of the input.
      * @param[in] weights_info       (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &info,
+    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
                            unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo());
 
     /** Set the input, weights, biases and output tensors.
@@ -123,37 +82,32 @@
      * @param[in]     weights      The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
      * @param[in]     bias         (Optional) The biases have one dimension. Data type supported: Same as @p input.
      * @param[out]    output       Output tensor. The output has the same number of dimensions as the @p input.
-     * @param[in]     info         Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
+     * @param[in]     deconv_info  Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
      * @param[in]     weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
      *
      */
-    void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, const WeightsInfo &weights_info = WeightsInfo());
+    void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info = WeightsInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
      *
      * @param[in] input        Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
      * @param[in] weights      The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
      * @param[in] bias         (Optional) The biases have one dimension. Data type supported: Same as @p input.
      * @param[in] output       Output tensor info. The output has the same number of dimensions as the @p input.
-     * @param[in] info         Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
+     * @param[in] deconv_info  Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
      * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &info, const WeightsInfo &weights_info = WeightsInfo());
+    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
+                           const WeightsInfo &weights_info = WeightsInfo());
 
     // Inherited methods overridden:
     void run() override;
     void prepare() override;
 
 private:
-    CLMemoryGroup                _memory_group;
-    CLDeconvolutionLayerUpsample _scale_f;
-    CLConvolutionLayer           _conv_f;
-    CPPFlipWeightsKernel         _flip_weights;
-    CLTensor                     _scaled_output;
-    ICLTensor                   *_original_weights;
-    CLTensor                     _weights_flipped;
-    bool                         _is_prepared;
+    std::shared_ptr<IMemoryManager> _memory_manager;
+    std::unique_ptr<IFunction>      _function;
 };
 }
 #endif /* __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h
new file mode 100644
index 0000000..936263d
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDIRECTDECONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_CLDIRECTDECONVOLUTIONLAYER_H__
+
+#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h"
+#include "arm_compute/runtime/CL/functions/CLTranspose.h"
+
+#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h"
+
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ICLTensor;
+/** Function to run the deconvolution layer.
+ *
+ * Deconvolution Layer is the backward pass of Convolution Layer. First we transform the input depending on the stride and pad info and then perform a 1x1
+ * convolution pass. Input stride defines how many zeroes we should put between each element of the input and pad is the amount of padding.
+ *
+ *  The relation between input to output is as follows:
+ *  \f[
+ *       width\_output = (width\_input - 1) \cdot stride\_x - 2 \cdot padding\_x + kernel\_x
+ *  \f]
+ *  \f[
+ *       height\_output = (height\_input - 1) \cdot stride\_y - 2 \cdot padding\_y + kernel\_y
+ *  \f]
+ *
+ *  where:
+ *      width_input is the size of the first input dimension.
+ *      height_input is the size of the second input dimension.
+ *      width_output is the size of the first output dimension.
+ *      height_output is the size of the second output dimension.
+ *      kernel_x and kernel_y are the convolution sizes in x and y.
+ *      stride_x and stride_y is the input stride of the first and second dimension.
+ *
+ * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution. Therefore, it will be necessary to use the weights in the
+ * reverse order to perform an actual convolution. This is achieved by using the @ref CPPFlipWeightsKernel.
+ *
+ * This function calls the following OpenCL kernels/functions:
+ *
+ * -# @ref CLDeconvolutionLayerUpsample
+ * -# @ref CLConvolutionLayer
+ *
+ * And the following CPP kernels:
+ * -# @ref CPPFlipWeightsKernel
+ *
+ */
+class CLDirectDeconvolutionLayer : public IFunction
+{
+public:
+    /** Constructor */
+    CLDirectDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLDirectDeconvolutionLayer(const CLDirectDeconvolutionLayer &) = delete;
+    /** Default move constructor */
+    CLDirectDeconvolutionLayer(CLDirectDeconvolutionLayer &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLDirectDeconvolutionLayer &operator=(const CLDirectDeconvolutionLayer &) = delete;
+    /** Default move assignment operator */
+    CLDirectDeconvolutionLayer &operator=(CLDirectDeconvolutionLayer &&) = default;
+    /** Set the input, weights, biases and output tensors.
+     *
+     * @param[in,out] input        Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
+     * @param[in]     weights      The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
+     * @param[in]     bias         (Optional) The biases have one dimension. Data type supported: Same as @p input.
+     * @param[out]    output       Output tensor. The output has the same number of dimensions as the @p input.
+     * @param[in]     info         Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
+     * @param[in]     weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+     *
+     */
+    void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, const WeightsInfo &weights_info = WeightsInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref CLDirectDeconvolutionLayer
+     *
+     * @param[in] input        Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
+     * @param[in] weights      The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
+     * @param[in] bias         (Optional) The biases have one dimension. Data type supported: Same as @p input.
+     * @param[in] output       Output tensor info. The output has the same number of dimensions as the @p input.
+     * @param[in] info         Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
+     * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &info,
+                           const WeightsInfo &weights_info = WeightsInfo());
+
+    // Inherited methods overridden:
+    void run() override;
+    void prepare() override;
+
+private:
+    CLMemoryGroup                _memory_group;
+    CLDeconvolutionLayerUpsample _scale_f;
+    CLConvolutionLayer           _conv_f;
+    CPPFlipWeightsKernel         _flip_weights;
+
+    CLTensor   _scaled_output;
+    ICLTensor *_original_weights;
+    CLTensor   _weights_flipped;
+
+    bool _is_prepared;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__ */