COMPMID-1413 - Improve the performance of GEMMLowp with 8 bit dot product on OpenCL
COMPMID-1424 - Add dot product support for CLDepthwise QASYMM8 3x3 NHWC non-unit stride

With this patch we are able to improve the performance of MobileNet v1-qasymm8 by 37 %
Tried to use the dot product instruction in CLDepthwise QASYMM8 3x3 NHWC non-unit stride
but I have not seen any benefit (maybe because we have few arithemtic operation and we
do not have more load instructions). However Depthwise convolution has been improved by
30%

Change-Id: Id768a99c2e53a04276707e427af5d0ec93419ada
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/155082
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index 1e456fa..36abb7b 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -70,6 +70,7 @@
 #include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h b/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h
index 4592fc2..96b01b0 100644
--- a/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h
@@ -68,14 +68,15 @@
      * @param[out] output                    Output tensor. Data type supported: same as @p input
      * @param[in]  mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleave block
      * @param[in]  reinterpret_input_as_3d   (Optional) True if the input has to be reinterpreted as 3D tensor
+     * @param[in]  unroll_block              (Optional) True if the 4x4 block has to be unrolled rather than transposed
      */
-    void configure(const ICLTensor *input, ICLTensor *output, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false);
+    void configure(const ICLTensor *input, ICLTensor *output, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false, bool unroll_block = false);
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMInterleave4x4Kernel
      *
      * @param[in] input                     Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in] output                    Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
      * @param[in] mult_interleave4x4_height Multiplication factor for the height of the 4x4 interleave block
-     * @param[in] reinterpret_input_as_3d   (Optional) True if the input has to be reinterpreted as 3D tensor
+     * @param[in] reinterpret_input_as_3d   True if the input has to be reinterpreted as 3D tensor
      *
      * @return a status
      */
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
index 871b97c..e6b7917 100644
--- a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -58,16 +58,18 @@
     CLGEMMLowpOffsetContributionKernel &operator=(CLGEMMLowpOffsetContributionKernel &&) = default;
     /** Initialise the kernel's input and output.
      *
-     * @param[in, out] mm_result      Input tensor containing the result of @ref CLGEMMLowpMatrixMultiplyKernel. Data type supported: S32
+     * @param[in, out] mm_result      Input tensor containing the result of @ref CLGEMMLowpMatrixMultiplyKernel
      * @param[in]      vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
      *                                Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
      * @param[in]      vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
      *                                Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
+     * @param[in]      bias           Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+     *                                Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
      * @param[in]      k              Number of matrix A columns or Matrix B rows
      * @param[in]      a_offset       Offset to be added to each element of the matrix A.
      * @param[in]      b_offset       Offset to be added to each element of the matrix B.
      */
-    void configure(ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, int32_t k, int32_t a_offset, int32_t b_offset);
+    void configure(ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset, int32_t b_offset);
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel
      *
      * @param[in] mm_result      Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32
@@ -75,12 +77,14 @@
      *                           Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
      * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
      *                           Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
+     * @param[in] bias           Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+     *                           Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
      * @param[in] a_offset       Offset to be added to each element of the matrix A.
      * @param[in] b_offset       Offset to be added to each element of the matrix B.
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, int32_t a_offset, int32_t b_offset);
+    static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, int32_t a_offset, int32_t b_offset);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
@@ -89,6 +93,7 @@
     const ICLTensor *_vector_sum_col;
     const ICLTensor *_vector_sum_row;
     ICLTensor       *_mm_result;
+    const ICLTensor *_bias;
 };
 } // namespace arm_compute
 
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h
new file mode 100644
index 0000000..de06c88
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H__
+#define __ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and perform the output stage.
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution
+ * of matrix A and matrix B and performs the output stage defined by the output_stage argument
+ *
+ */
+class CLGEMMLowpOffsetContributionOutputStageKernel : public ICLKernel
+{
+public:
+    /** Constructor */
+    CLGEMMLowpOffsetContributionOutputStageKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers)*/
+    CLGEMMLowpOffsetContributionOutputStageKernel(const CLGEMMLowpOffsetContributionOutputStageKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers)*/
+    CLGEMMLowpOffsetContributionOutputStageKernel &operator=(const CLGEMMLowpOffsetContributionOutputStageKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    CLGEMMLowpOffsetContributionOutputStageKernel(CLGEMMLowpOffsetContributionOutputStageKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    CLGEMMLowpOffsetContributionOutputStageKernel &operator=(CLGEMMLowpOffsetContributionOutputStageKernel &&) = default;
+    /** Initialise the kernel's input and output.
+     *
+     * @param[in]  mm_result      Input tensor containing the result of @ref CLGEMMLowpMatrixMultiplyKernel. Data type supported: S32
+     * @param[in]  vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
+     *                            Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
+     * @param[in]  vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
+     *                            Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
+     * @param[in]  bias           Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+     *                            Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[out] output         Output tensor. Data type supported: QASYMM8
+     * @param[in]  k              Number of matrix A columns or Matrix B rows
+     * @param[in]  a_offset       Offset to be added to each element of the matrix A.
+     * @param[in]  b_offset       Offset to be added to each element of the matrix B.
+     * @param[in]  output_stage   GEMMLowp output stage info
+     */
+    void configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output, int32_t k, int32_t a_offset, int32_t b_offset,
+                   const GEMMLowpOutputStageInfo &output_stage);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel
+     *
+     * @param[in] mm_result      Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32 or QASYMM8 if output_stage != NONE
+     * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
+     *                           Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
+     * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
+     *                           Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
+     * @param[in] bias           Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+     *                           Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[in] output         Output tensor. Data type supported: QASYMM8
+     * @param[in] a_offset       Offset to be added to each element of the matrix A.
+     * @param[in] b_offset       Offset to be added to each element of the matrix B.
+     * @param[in] output_stage   GEMMLowp output stage info
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset,
+                           int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage);
+
+    // Inherited methods overridden:
+    void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+    const ICLTensor *_mm_result;
+    const ICLTensor *_vector_sum_col;
+    const ICLTensor *_vector_sum_row;
+    const ICLTensor *_bias;
+    ICLTensor       *_output;
+};
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
index 1206206..72373b5 100644
--- a/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -67,25 +67,22 @@
      * @param[in]  min                          (Optional) Min value used to saturate down the output result before converting back to QASYMM8
      * @param[in]  max                          (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
      *                                          Along with @p min, this value can be used to implement "rectified linear unit" activation functions
-     * @param[in]  output_3d_depth              (Optional) Depth of output in 3D (Defaults to 1)
      */
     void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
-                   int min = 0, int max = 0, unsigned int output_3d_depth = 1);
+                   int min = 0, int max = 0);
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
      *
-     * @param[in] input           Input tensor. Data type supported: S32
-     * @param[in] bias            Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
-     *                            Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
-     * @param[in] output          Output tensor. Data type supported: Data type supported: QASYMM8
-     * @param[in] min             (Optional) Min value used to saturate down the output result before converting back to QASYMM8
-     * @param[in] max             (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+     * @param[in] input  Input tensor. Data type supported: S32
+     * @param[in] bias   Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+     *                   Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+     * @param[in] min    (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+     * @param[in] max    (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
      *                            Along with @p min, this value can be used to implement "rectified linear unit" activation functions
-     * @param[in] output_3d_depth (Optional)  Depth of output in 3D (Defaults to 1)
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
-                           int min = 0, int max = 0, unsigned int output_3d_depth = 1);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
@@ -94,7 +91,6 @@
     const ICLTensor *_input;
     const ICLTensor *_bias;
     ICLTensor       *_output;
-    bool             _reinterpret_as_3d;
 };
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFIXEDPOINTKERNEL_H__ */
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 5e04bcd..134b8e2 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1205,6 +1205,26 @@
     const bool _reinterpret_input_as_3d;
 };
 
+/** GEMMLowp output stage type */
+enum class GEMMLowpOutputStageType
+{
+    NONE,                     /**< No quantization to uint8 */
+    QUANTIZE_DOWN,            /**< Quantize to uint8 using an integer multiplication */
+    QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */
+    QUANTIZE_DOWN_FLOAT       /**< Quantize to uint8 using a floating point multiplication */
+};
+
+/** GEMMLowp output stage info */
+struct GEMMLowpOutputStageInfo
+{
+    GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
+    int                     gemmlowp_offset{ 0 };                  /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
+    int                     gemmlowp_multiplier{ 0 };              /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
+    int                     gemmlowp_shift{ 0 };                   /**< GEMMLowp output stage shift used for quantizing to uint8 */
+    int                     gemmlowp_min_bound{ 0 };               /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
+    int                     gemmlowp_max_bound{ 0 };               /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
+};
+
 /** GEMM information class. This class stores the necessary information to compute GEMM functions
  *
  * This object also contains the information about how matrix A and matrix B have been reshaped
@@ -1215,7 +1235,7 @@
 public:
     /** Default constructor */
     GEMMInfo()
-        : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false), _depth_output_gemm3d(1), _reinterpret_input_as_3d(false), _retain_internal_weights(false)
+        : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false), _depth_output_gemm3d(1), _reinterpret_input_as_3d(false), _retain_internal_weights(false), _gemmlowp_output_stage()
     {
     }
     /** Constructor
@@ -1227,11 +1247,13 @@
      * @param[in] reinterpret_input_as_3d     (Optional) Reinterpret the input as 3D tensor. (i.e. this flag should be set to true when GEMM is used
      *                                        to perform 1x1 convolutions with the NHWC data layout)
      * @param[in] retain_internal_weights     (Optional) Retain the weights tensor from previous run
+     * @param[in] gemmlowp_output_stage       (Optional) GEMMLowp Output stage info
      *
      */
-    GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 1, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false)
+    GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 1, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
+             GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo())
         : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), _depth_output_gemm3d(depth_output_gemm3d),
-          _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights)
+          _reinterpret_input_as_3d(reinterpret_input_as_3d), _retain_internal_weights(retain_internal_weights), _gemmlowp_output_stage(gemmlowp_output_stage)
     {
     }
     /** Flag which specifies if the matrix A has been reshaped
@@ -1284,14 +1306,23 @@
     {
         return _retain_internal_weights;
     };
+    /** GEMMLowp output stage
+     *
+     * @return the GEMMLowp output stage info
+     */
+    GEMMLowpOutputStageInfo gemmlowp_output_stage() const
+    {
+        return _gemmlowp_output_stage;
+    };
 
 private:
-    const bool _is_a_reshaped;
-    const bool _is_b_reshaped;
-    const bool _reshape_b_only_on_first_run;
-    const int  _depth_output_gemm3d;
-    const bool _reinterpret_input_as_3d;
-    const bool _retain_internal_weights;
+    const bool                    _is_a_reshaped;
+    const bool                    _is_b_reshaped;
+    const bool                    _reshape_b_only_on_first_run;
+    const int                     _depth_output_gemm3d;
+    const bool                    _reinterpret_input_as_3d;
+    const bool                    _retain_internal_weights;
+    const GEMMLowpOutputStageInfo _gemmlowp_output_stage;
 };
 
 /** Winograd information */
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index cfd2736..e7fbbfe 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -927,6 +927,13 @@
  * @return The string describing the pooling type.
  */
 const std::string &string_from_pooling_type(PoolingType type);
+/** Translates a given GEMMLowp output stage to a string.
+ *
+ * @param[in] output_stage @ref GEMMLowpOutputStageInfo to be translated to string.
+ *
+ * @return The string describing the GEMMLowp output stage
+ */
+const std::string &string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage);
 /** Convert a PixelValue to a string, represented through the specific data type
  *
  * @param[in] value     The PixelValue to convert
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 48b8801..fbf0c08 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -157,43 +157,48 @@
 private:
     /** Configures the appropriate matrix multiply routine
      *
-     * @param[in]      input         Input tensor. Data types supported: QASYMM8/F16/F32.
-     * @param[in]      weights       Weights tensor. Data type supported: Same as @p input.
-     * @param[in, out] output        Output tensor. Data types supported: Same as @p input,
-     *                               except for input of QASYMM8 type where output should be of S32 type.
-     * @param[in]      gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
+     * @param[in]      input                 Input tensor. Data types supported: QASYMM8/F16/F32.
+     * @param[in]      weights               Weights tensor. Data type supported: Same as @p input.
+     * @param[in]      biases                Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+     *                                       Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
+     * @param[in, out] output                Output tensor. Data types supported: Same as @p input,
+     *                                       except for input of QASYMM8 type where output should be of S32 type.
+     * @param[in]      gemmlowp_output_stage GEMMLowp output stage info
+     * @param[in]      gemm_3d_depth         (Optional) Depth of GEMM 3D (Defaults to 1)
      */
-    void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, int gemm_3d_depth = 1);
+    void configure_mm(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth = 1);
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
      *
-     * @param[in] input         Input tensor. Data types supported: QASYMM8/F16/F32.
-     * @param[in] weights       Weights tensor. Data type supported: Same as @p input.
-     * @param[in] output        Output tensor. Data types supported: Same as @p input,
-     *                          except for input of QASYMM8 type where output should be of S32 type.
-     * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
-     * @param[in] skip_im2col   (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false)
+     * @param[in] input                 Input tensor. Data types supported: QASYMM8/F16/F32.
+     * @param[in] weights               Weights tensor. Data type supported: Same as @p input.
+     * @param[in] output                Output tensor. Data types supported: Same as @p input,
+     *                                  except for input of QASYMM8 type where output should be of S32 type.
+     * @param[in] biases                Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+     *                                  Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
+     * @param[in] gemmlowp_output_stage GEMMLowp output stage info
+     * @param[in] gemm_3d_depth         (Optional) Depth of GEMM 3D (Defaults to 1)
+     * @param[in] skip_im2col           (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false)
      *
      * @return a status
      */
-    static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, int gemm_3d_depth = 1, bool skip_im2col = false);
+    static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+                              int gemm_3d_depth = 1, bool skip_im2col = false);
 
 private:
-    CLMemoryGroup                                  _memory_group;
-    CLConvolutionLayerReshapeWeights               _reshape_weights;
-    CLIm2ColKernel                                 _im2col_kernel;
-    CLGEMM                                         _mm_gemm;
-    CLGEMMLowpMatrixMultiplyCore                   _mm_gemmlowp;
-    CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat _gemmlowp_output_stage;
-    CLCol2ImKernel                                 _col2im_kernel;
-    CLActivationLayer                              _activationlayer_function;
-    CLArithmeticAdditionKernel                     _add_bias_kernel;
+    CLMemoryGroup                    _memory_group;
+    CLConvolutionLayerReshapeWeights _reshape_weights;
+    CLIm2ColKernel                   _im2col_kernel;
+    CLGEMM                           _mm_gemm;
+    CLGEMMLowpMatrixMultiplyCore     _mm_gemmlowp;
+    CLCol2ImKernel                   _col2im_kernel;
+    CLActivationLayer                _activationlayer_function;
+    CLArithmeticAdditionKernel       _add_bias_kernel;
 
     const ICLTensor *_original_weights;
 
     CLTensor _im2col_output;
     CLTensor _weights_reshaped;
     CLTensor _gemm_output;
-    CLTensor _tmp_output;
 
     DataLayout _data_layout;
 
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
index f404ccd..82f307a 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h
@@ -27,6 +27,7 @@
 #include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
 #include "arm_compute/runtime/CL/CLMemoryGroup.h"
@@ -45,7 +46,8 @@
  *  -# @ref CLGEMMLowpMatrixMultiplyKernel
  *  -# @ref CLGEMMLowpMatrixAReductionKernel (if the offset of matrix B is not 0)
  *  -# @ref CLGEMMLowpMatrixBReductionKernel (if the offset of matrix A is not 0)
- *  -# @ref CLGEMMLowpOffsetContributionKernel
+ *  -# @ref CLGEMMLowpOffsetContributionKernel (if gemm_info.gemmlowp_output_stage == NONE)
+ *  -# @ref CLGEMMLowpOffsetContributionOutputStageKernel (if gemm_info.gemmlowp_output_stage != NONE)
  *
 */
 class CLGEMMLowpMatrixMultiplyCore : public IFunction
@@ -63,54 +65,60 @@
     CLGEMMLowpMatrixMultiplyCore &operator=(CLGEMMLowpMatrixMultiplyCore &&) = default;
     /** Initialise the kernel's inputs, output
      *
-     * @note GEMM_LOWP:  low precision GEMM kernel
+     * @note GEMMLowp:  low precision GEMM kernel. [A * B + C]
      *  This kernel performs the following computations:
      *
      *  -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
      *  -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
      *  -# Compute the matrix product of the resulting a * b in int32.
+     *  -# Quantize to uint8 if gemm_info.gemmlowp_output_stage != NONE
      *
      * @param[in]  a         First input tensor  (Matrix A). Data type supported: QASYMM8.
      * @param[in]  b         Second input tensor (Matrix B). Data type supported: same as @p a
-     * @param[out] output    Output tensor. Data type supported: Data type supported: S32
+     * @param[in]  c         Third input tensor  (Matrix C). It can be a nullptr. Data type supported: S32
+     * @param[out] output    Output tensor. Data type supported: S32 or QASYMM8 if gemm_info.gemmlowp_output_stage != NONE
      * @param[in]  gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
      *                       if the reshape of matrix B should be executed only for the first run
      */
-    void configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output, const GEMMInfo &gemm_info = GEMMInfo());
+    void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info = GEMMInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyCore
      *
      * @param[in] a         First input tensor  (Matrix A). Data type supported: QASYMM8.
      * @param[in] b         Second input tensor (Matrix B). Data type supported: same as @p a
-     * @param[in] output    Output tensor. Data type supported: Data type supported: S32
+     * @param[in] c         Third input tensor  (Matrix C). It can be a nullptr. Data type supported: S32
+     * @param[in] output    Output tensor. Data type supported: S32 or QASYMM8 if gemm_info.gemmlowp_output_stage != NONE
      * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
      *                      if the reshape of matrix B should be executed only for the first run
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
+    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
 
     // Inherited methods overridden:
     void run() override;
     void prepare() override;
 
 private:
-    CLMemoryGroup                      _memory_group;
-    CLGEMMLowpMatrixMultiplyKernel     _mm_kernel;
-    CLGEMMInterleave4x4Kernel          _mtx_a_reshape_kernel;
-    CLGEMMTranspose1xWKernel           _mtx_b_reshape_kernel;
-    CLGEMMLowpMatrixAReductionKernel   _mtx_a_reduction_kernel;
-    CLGEMMLowpMatrixBReductionKernel   _mtx_b_reduction_kernel;
-    CLGEMMLowpOffsetContributionKernel _offset_contribution_kernel;
-    CLTensor                           _vector_sum_col;
-    CLTensor                           _vector_sum_row;
-    CLTensor                           _tmp_a;
-    CLTensor                           _tmp_b;
-    const ICLTensor                   *_original_b;
-    int32_t                            _a_offset;
-    int32_t                            _b_offset;
-    bool                               _is_interleaved_transposed;
-    bool                               _reshape_b_only_on_first_run;
-    bool                               _is_prepared;
+    CLMemoryGroup                                 _memory_group;
+    CLGEMMLowpMatrixMultiplyKernel                _mm_kernel;
+    CLGEMMInterleave4x4Kernel                     _mtx_a_reshape_kernel;
+    CLGEMMTranspose1xWKernel                      _mtx_b_reshape_kernel;
+    CLGEMMLowpMatrixAReductionKernel              _mtx_a_reduction_kernel;
+    CLGEMMLowpMatrixBReductionKernel              _mtx_b_reduction_kernel;
+    CLGEMMLowpOffsetContributionKernel            _offset_contribution_kernel;
+    CLGEMMLowpOffsetContributionOutputStageKernel _offset_contribution_output_stage_kernel;
+    CLTensor                                      _vector_sum_col;
+    CLTensor                                      _vector_sum_row;
+    CLTensor                                      _tmp_a;
+    CLTensor                                      _tmp_b;
+    CLTensor                                      _mm_result_s32;
+    const ICLTensor                              *_original_b;
+    int32_t                                       _a_offset;
+    int32_t                                       _b_offset;
+    bool                                          _is_interleaved_transposed;
+    bool                                          _reshape_b_only_on_first_run;
+    bool                                          _is_prepared;
+    bool                                          _fuse_output_stage;
 };
 }
 #endif /*__ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h
index 51fcbe9..3330b40 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h
@@ -131,24 +131,22 @@
      * @param[in]  min                          (Optional) Min value used to saturate down the output result before converting back to QASYMM8
      * @param[in]  max                          (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
      *                                          Along with @p min, this value can be used to implement "rectified linear unit" activation functions
-     * @param[in]  output_3d_depth              (Optional) Depth of output in 3D (Defaults to 1)
      */
     void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
-                   int min = 0, int max = 0, unsigned int output_3d_depth = 1);
+                   int min = 0, int max = 0);
     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
      *
-     * @param[in] input           Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
-     * @param[in] bias            Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
-     *                            Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
-     * @param[in] output          Output tensor. Data type supported: Data type supported: QASYMM8
-     * @param[in] min             (Optional) Min value used to saturate down the output result before converting back to QASYMM8
-     * @param[in] max             (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+     * @param[in] input  Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+     * @param[in] bias   Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+     *                   Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+     * @param[in] min    (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+     * @param[in] max    (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
      *                            Along with @p min, this value can be used to implement "rectified linear unit" activation functions
-     * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1)
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0, unsigned int output_3d_depth = 1);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
 };
 
 /** Basic function to execute CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat on OpenCL.
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 3db76f4..682475c 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -75,22 +75,24 @@
      *
      * @param[in]  a         First input tensor  (Matrix A). Data type supported: QASYMM8.
      * @param[in]  b         Second input tensor (Matrix B). Data type supported: same as @p a
+     * @param[in]  c         Third input tensor  (Matrix C). It can be a nullptr. Data type supported: S32
      * @param[out] output    Output tensor. Data type supported: Data type supported: S32
      * @param[in]  gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
      *                       if the reshape of matrix B should be executed only for the first run
      */
-    void configure(const ITensor *a, const ITensor *b, ITensor *output, const GEMMInfo &gemm_info = GEMMInfo());
+    void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info = GEMMInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
      *
      * @param[in] a         First input tensor  (Matrix A). Data type supported: QASYMM8.
      * @param[in] b         Second input tensor (Matrix B). Data type supported: same as @p a
+     * @param[in] c         Third input tensor  (Matrix C). It can be a nullptr. Data type supported: S32
      * @param[in] output    Output tensor. Data type supported: Data type supported: S32
      * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
      *                      if the reshape of matrix B should be executed only for the first run
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
+    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
 
     // Inherited methods overridden
     void run() override;