COMPMID-856: CL Depthwise Convolution QASYMM8 support

Change-Id: Ic6097e7cf160e8b829fb521b7b99d9a57d9799d3
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118774
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index e9a1fde..e7fa74d 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -53,6 +53,7 @@
 #include "arm_compute/core/CL/kernels/CLDerivativeKernel.h"
 #include "arm_compute/core/CL/kernels/CLDilateKernel.h"
 #include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
+#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h"
 #include "arm_compute/core/CL/kernels/CLErodeKernel.h"
 #include "arm_compute/core/CL/kernels/CLFastCornersKernel.h"
 #include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h
index 7e786e8..86b8b90 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -51,7 +51,7 @@
     /** Set the input and output of the kernel.
      *
      * @param[in]  input       The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
-     *                         while every optional dimension from 4 and above represent a batch of inputs. Data types supported: F32
+     *                         while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F32
      * @param[out] output      The output tensor. First 3 lower dimensions represent a transform of each 3D input,
      *                         while every dimension above 3 represents a batch. Data types supported: Same as @p input
      * @param[in]  kernel_dims The kernel dimensions (width and height).
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h
index 1dae9b2..36d7cad 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -52,7 +52,7 @@
     CLDepthwiseVectorToTensorKernel &operator=(CLDepthwiseVectorToTensorKernel &&) = default;
     /** Set the input and output of the kernel.
      *
-     * @param[in]  input  The input vector to convert. Data type supported: F32.
+     * @param[in]  input  The input vector to convert. Data type supported: QASYMM8/S32/F16/F32.
      * @param[out] output The output tensor. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: same as @p input.
      * @param[in]  conv_w The converted tensor's width.
      * @param[in]  conv_h The converted tensor's height.
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h
index 7989257..c8d1e2f 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -50,7 +50,8 @@
     CLDepthwiseWeightsReshapeKernel &operator=(CLDepthwiseWeightsReshapeKernel &&) = default;
     /** Set the input and output of the kernel.
      *
-     * @param[in]  input  The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: F32.
+     * @param[in]  input  The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM].
+     *                    Data type supported: QASYMM8/F32.
      * @param[out] output The output tensor. Data type supported: same as @p input.
      * @param[in]  biases (Optional) The input biases to add. Shape [IFM]. Data type supported: same as @p input.
      */
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
new file mode 100644
index 0000000..9340e9a
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__
+#define __ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+/** OpenCL kernel to accumulate the biases, if provided, or downscale in case of quantized input.
+ *
+ * @note We assume bias to be shared
+ */
+class CLDirectConvolutionLayerOutputStageKernel : public ICLKernel
+{
+public:
+    /** Default constructor */
+    CLDirectConvolutionLayerOutputStageKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLDirectConvolutionLayerOutputStageKernel(const CLDirectConvolutionLayerOutputStageKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLDirectConvolutionLayerOutputStageKernel &operator=(const CLDirectConvolutionLayerOutputStageKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    CLDirectConvolutionLayerOutputStageKernel(CLDirectConvolutionLayerOutputStageKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    CLDirectConvolutionLayerOutputStageKernel &operator=(CLDirectConvolutionLayerOutputStageKernel &&) = default;
+    /** Default destructor */
+    ~CLDirectConvolutionLayerOutputStageKernel() = default;
+    /** Set the accumulate buffer and the biases of the kernel.
+     *
+     * @param[in, out] input                        Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+     *                                              Data type supported: S32/QS16/QS32/F16/F32
+     * @param[in]      bias                         (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+     * @param[out]     output                       (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+     *                                              Required parameter if output is of QASYMM8 type.
+     *                                              Data types supported: QS8/QASYMM8/QS16/F16/F32
+     * @param[in]      result_fixedpoint_multiplier (Optional)Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+     * @param[in]      result_shift                 (Optional)Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
+     * @param[in]      result_offset_after_shift    (Optional)Offset to be applied to result before converting it back to QASYMM8
+     */
+    void configure(ICLTensor *input, const ICLTensor *bias = nullptr, ICLTensor *output = nullptr,
+                   int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerOutputStageKernel
+     *
+     * @param[in] input  Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+     *                   Data type supported: QS16/QS32/F16/F32
+     * @param[in] bias   (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+     * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+     *                         Data type supported: QS8/QS16/F16/F32
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr);
+
+    // Inherited methods overridden:
+    void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+    ICLTensor       *_input;
+    const ICLTensor *_bias;
+    ICLTensor       *_output;
+    int              _result_fixedpoint_multiplier;
+    int              _result_shift;
+    int              _result_offset_after_shift;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h
index 580322f..c27307e 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -46,8 +46,8 @@
     CLGEMMMatrixVectorMultiplyKernel &operator=(CLGEMMMatrixVectorMultiplyKernel &&) = default;
     /** Set the input and output of the kernel.
      *
-     * @param[in]  input0 The reshaped input tensor. Data types supported: F16/F32
-     * @param[in]  input1 The 2D reshaped weights tensor. Data type supported: Same as @p input.
+     * @param[in]  input0 The reshaped input tensor. Data types supported: QASYMM8/F16/F32
+     * @param[in]  input1 The 2D reshaped weights tensor. Data type supported: Same as @p input, S32 for QASYMM8 input.
      * @param[out] output The output 2D tensor. Data types supported: Same as @p input
      */
     void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output);
diff --git a/arm_compute/core/NEON/kernels/NEGEMMMatrixVectorMultiplyKernel.h b/arm_compute/core/NEON/kernels/NEGEMMMatrixVectorMultiplyKernel.h
index 95fe916..286be1a 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMMatrixVectorMultiplyKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMMatrixVectorMultiplyKernel.h
@@ -51,7 +51,7 @@
      *
      * @param[in]  input0 First Input tensor. Data types supported: QASYMM8/F32
      * @param[in]  input1 Second Input tensor. Data types supported: same as @p input.
-     * @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
+     * @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input, S32 for QASYMM8 input.
      */
     void configure(const ITensor *input0, const ITensor *input1, ITensor *output);
 
diff --git a/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
index f789941..b34967d 100644
--- a/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -28,6 +28,7 @@
 #include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h"
 #include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h"
 #include "arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h"
+#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h"
 #include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h"
 #include "arm_compute/core/Types.h"
@@ -83,10 +84,10 @@
     CLDepthwiseConvolutionLayer();
     /** Initialize the function's source, destination, weights and convolution information.
      *
-     * @param[in, out] input     Source tensor. Data type supported: F32. (Written to only for border filling).
+     * @param[in, out] input     Source tensor. Data type supported: QASYMM8/F32. (Written to only for border filling).
      * @param[in]      weights   Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM]. Data type supported: Same as @p input.
      * @param[in]      biases    (Optional) Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
-     *                           Data type supported: Same as @p input.
+     *                           Data type supported: Same as @p input, S32 when input is QASYMM8.
      * @param[out]     output    Destination tensor. Data type supported: same as @p input.
      * @param[in]      conv_info Padding and stride information to use for the convolution.
      */
@@ -96,15 +97,18 @@
     void run() override;
 
 private:
-    CLDepthwiseIm2ColKernel          _im2col_kernel;
-    CLDepthwiseWeightsReshapeKernel  _weights_reshape_kernel;
-    CLGEMMMatrixVectorMultiplyKernel _v2mm_kernel;
-    CLDepthwiseVectorToTensorKernel  _vector_to_tensor_kernel;
-    CLFillBorderKernel               _v2mm_input_fill_border;
-    CLFillBorderKernel               _v2mm_weights_fill_border;
-    CLTensor                         _input_reshaped;
-    CLTensor                         _weights_reshaped;
-    CLTensor                         _v2mm_output;
+    CLDepthwiseIm2ColKernel                   _im2col_kernel;
+    CLDepthwiseWeightsReshapeKernel           _weights_reshape_kernel;
+    CLGEMMMatrixVectorMultiplyKernel          _v2mm_kernel;
+    CLDepthwiseVectorToTensorKernel           _vector_to_tensor_kernel;
+    CLDirectConvolutionLayerOutputStageKernel _output_stage_kernel;
+    CLFillBorderKernel                        _v2mm_input_fill_border;
+    CLFillBorderKernel                        _v2mm_weights_fill_border;
+    CLTensor                                  _input_reshaped;
+    CLTensor                                  _weights_reshaped;
+    CLTensor                                  _v2mm_output;
+    CLTensor                                  _output_reshaped;
+    bool                                      _is_quantized;
 };
 }
 #endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
index 1c65b3c..f5805f5 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
@@ -103,7 +103,7 @@
      * @param[out]     output    Destination tensor. Data type supported: same as @p input.
      * @param[in]      weights   Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM]. Data type supported: Same as @p input.
      * @param[in]      biases    (Optional) Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
-     *                           Data type supported: Same as @p input.
+     *                           Data type supported: Same as @p input, S32 when input is QASYMM8.
      * @param[in]      conv_info Padding and stride information to use for the convolution.
      */
     void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info);