COMPMID-1635: Optimize CLDeconvolutionLayer - Part III

Change-Id: Id2661e093a669ef3eaf2a5116cd278a80c1d5a89
Signed-off-by: giuros01 <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/935
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com>
Comments-Addressed: Isabella Gottardi <isabella.gottardi@arm.com>
Tested-by: Isabella Gottardi <isabella.gottardi@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/CL/CLKernelLibrary.h b/arm_compute/core/CL/CLKernelLibrary.h
index c1999b4..741e47c 100644
--- a/arm_compute/core/CL/CLKernelLibrary.h
+++ b/arm_compute/core/CL/CLKernelLibrary.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -333,7 +333,7 @@
      * @param[in] built_program_name Name of the program
      * @param[in] program            Built program to add to the cache
      */
-    void add_built_program(const std::string &built_program_name, cl::Program program);
+    void add_built_program(const std::string &built_program_name, const cl::Program &program);
 
     /** Returns true if FP16 is supported by the CL device
      *
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index 5749871..3f5a7dc 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -48,6 +48,7 @@
 #include "arm_compute/core/CL/kernels/CLCopyKernel.h"
 #include "arm_compute/core/CL/kernels/CLCropKernel.h"
 #include "arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
+#include "arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
 #include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
new file mode 100644
index 0000000..4b6284c
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDECONVOLUTIONLAYERRESHAPEOUTPUTKERNEL_H__
+#define __ARM_COMPUTE_CLDECONVOLUTIONLAYERRESHAPEOUTPUTKERNEL_H__
+
+#include "arm_compute/core/CL/ICLSimpleKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the OpenCL kernel to be used for reshaping the tensor before returning the result of deconvolution.
+ *
+ * The input tensor to this OpenCL kernel is expected to be the result of a @ref CLGEMM operation between the Deconvolution input and the Deconvolution filter.
+ *
+ * The input tensor should have the following shape: [filter_width * filter_height * ofms, width, height, batch_size]
+ *
+ * The output tensor should have the following shape: [stride_x * (input_width - 1) + filter_width - 2 * padx, stride_y * (input_height - 1) + filter_height - 2 * pady, ofms, batch_size]
+ *
+ * For example, given a tensor with dimensions [4, 2, 2] this function returns a tensor with dimensions [1, 4, 4].
+ *
+ */
+class CLDeconvolutionReshapeOutputKernel : public ICLSimpleKernel
+{
+public:
+    /** Default constructor */
+    CLDeconvolutionReshapeOutputKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLDeconvolutionReshapeOutputKernel(const CLDeconvolutionReshapeOutputKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLDeconvolutionReshapeOutputKernel &operator=(const CLDeconvolutionReshapeOutputKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    CLDeconvolutionReshapeOutputKernel(CLDeconvolutionReshapeOutputKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    CLDeconvolutionReshapeOutputKernel &operator=(CLDeconvolutionReshapeOutputKernel &&) = default;
+    /** Default destructor */
+    ~CLDeconvolutionReshapeOutputKernel() = default;
+
+    /** Initialise the kernel's source and destination.
+     *
+     * @param[in]  input        Input tensor. Supported data types: F16/F32.
+     * @param[in]  bias         Bias tensor to be added directly during the reshape operation. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[out] output       Output tensor with the following shape: [stride_x * (input_width - 1) + filter_width - 2 * padx, stride_y * (input_height - 1) + filter_height - 2 * pady, ofms, batch_size]
+     *                          Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in]  input_info   Deconvolution input tensor info. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in]  weights_info Deconvolution weights tensor info. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in]  deconv_info  Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
+     */
+    void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref  CLDeconvolutionReshapeOutputKernel.
+     *
+     * @param[in] input        GEMM output tensor info to be reshaped. Supported data types: F16/F32.
+     * @param[in] bias         (Optional) Optional bias tensor info to be added directly during the reshape operation. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in] output       Reshaped output tensor info. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in] input_info   Original input tensor info. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in] weights_info Original weights tensor info output. Supported data types: same as @p input.  Supported data layouts: same as @p input.
+     * @param[in] deconv_info  Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
+     *
+     * @return a Status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info);
+
+    // Inherited methods overridden:
+    void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+    bool             _add_bias;
+    const ICLTensor *_bias;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLDECONVOLUTIONLAYERRESHAPEOUTPUTKERNEL_H__ */
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 24e91bd..a2dfbb7 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -134,6 +134,13 @@
     FFT       /**< Convolution using FFT */
 };
 
+/** Available DeconvolutionMethod*/
+enum class DeconvolutionMethod
+{
+    GEMM,   /**< Deconvolution using GEMM */
+    DIRECT, /**< Direct deconvolution */
+};
+
 /** Padding mode to use for PadLayer */
 enum class PaddingMode
 {