COMPMID-616 - Optimizing GEMMLowp on NEON intrinsics

Change-Id: Ibbeff5d37249b6e8fc34ad496035a1511c9da5a3
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94072
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/arm_compute/core/Dimensions.h b/arm_compute/core/Dimensions.h
index 96dd371..70b6e1a 100644
--- a/arm_compute/core/Dimensions.h
+++ b/arm_compute/core/Dimensions.h
@@ -119,8 +119,8 @@
 
     /** Collapse dimensions.
      *
-     * @param[in] first Dimensions into which the following @p n are collapsed.
      * @param[in] n     Number of dimensions to collapse into @p first.
+     * @param[in] first Dimensions into which the following @p n are collapsed.
      */
     void collapse(size_t n, size_t first = 0)
     {
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 8d8ecda..918dfc6 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -62,7 +62,9 @@
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
index aa942c4..cdeb11d 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
@@ -46,7 +46,7 @@
      * @param[in]  input        Input tensor. Data types supported: U8
      * @param[out] output       Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
      * @param[in]  block_height The height of the blocks to be interleaved.
-     * @param[in]  block_width  The width of the blocks to be interleved.
+     * @param[in]  block_width  The width of the blocks to be interleaved.
      * @param[in]  transpose    True if transpose operation must be performed, false otherwise.
      */
     void configure(const ITensor *input, ITensor *output, unsigned int block_height, unsigned int block_width, bool transpose);
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
new file mode 100644
index 0000000..77b2bdc
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPFINALIZEKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPFINALIZEKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/* NEON kernel used to finalize the GEMMLowp result
+ *
+ * This kernel performs the following computations:
+ *
+ *  -# Add offset terms to final result
+ *  -# Multiply each entry of result and round to nearest integer
+ *  -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ */
+class NEGEMMLowpFinalizeKernel : public INEKernel
+{
+public:
+    /** Constructor */
+    NEGEMMLowpFinalizeKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers)*/
+    NEGEMMLowpFinalizeKernel(const NEGEMMLowpFinalizeKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers)*/
+    NEGEMMLowpFinalizeKernel &operator=(const NEGEMMLowpFinalizeKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    NEGEMMLowpFinalizeKernel(NEGEMMLowpFinalizeKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    NEGEMMLowpFinalizeKernel &operator=(NEGEMMLowpFinalizeKernel &&) = default;
+    /** Initialise the kernel's input and output.
+     *
+     * @note The input row-vectors  @p vector_sum_col and @p vector_sum_row must be the output of @ref NEGEMMLowpMatrixBReductionKernel and @ref NEGEMMLowpMatrixAReductionKernel kernels.
+     *       These 2 vectors are needed to handle the offset of matrix product
+     *       https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
+     *
+     * @param[in]  vector_sum_col Input row-vector of sums of all the entries in each column of input1.
+     *                            Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32
+     * @param[in]  vector_sum_row Input row-vector of sums of all the entries in each row of input0.
+     *                            Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p vector_sum_col
+     * @param[in]  mm_result      Input tensor containing the result of @ref NEGEMMLowpMatrixMultiplyKernel. Data type supported: same as @p vector_sum_col
+     * @param[out] output         Output tensor containing the result of GEMMLowP. Data type supported: U8
+     * @param[in]  num_mtx_a_cols Number of matrix A columns
+     * @param[in]  a_offset       Offset to be added to each element of the matrix A.
+     * @param[in]  b_offset       Offset to be added to each element of the matrix B.
+     * @param[in]  c_offset       Offset to be added to each element of the output matrix
+     * @param[in]  c_mult_int     Value to be multiplied to each entry of the result.
+     * @param[in]  shift          Number of bits to shift right the result.
+     */
+    void configure(const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *mm_result, ITensor *output, int32_t num_mtx_a_cols, int32_t a_offset, int32_t b_offset, int32_t c_offset,
+                   int32_t c_mult_int, int32_t shift);
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+    /** Template function to run the finalize kernel
+     *
+     * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+     */
+    template <bool add_a_offset, bool add_b_offset>
+    void finalize(const Window &window);
+    using FinalizeFunctionPtr = void (NEGEMMLowpFinalizeKernel::*)(const Window &window);
+
+    FinalizeFunctionPtr _func;
+    const ITensor      *_vector_sum_col;
+    const ITensor      *_vector_sum_row;
+    const ITensor      *_mm_result;
+    ITensor            *_output;
+    int32_t             _a_offset;
+    int32_t             _b_offset;
+    int32_t             _c_offset;
+    int32_t             _k_offset;
+    int32_t             _c_mult_int;
+    int32_t             _shift;
+    bool                _slide_vector_sum_col;
+};
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_NEGEMMLOWPFINALIZEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index f526d21..670274b 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -35,12 +35,9 @@
  * @note @ref NEGEMMLowpMatrixMultiplyKernel low precision matrix product kernel
  *  This kernel performs the following computation:
  *
- *  -# Convert a values from uint8 to int32 and add a_offset to each of them.
- *  -# Convert b values from uint8 to int32 and add b_offset to each of them.
- *  -# Compute the int32 matrix product of the resulting a * b.
- *  -# Add output_offset to each entry of the result.
- *  -# Multiply each entry of the result and round to the nearest integer
- *  -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ *  -# Convert a values from uint8 to int32
+ *  -# Convert b values from uint8 to int32
+ *  -# Compute the int32 matrix product of the resulting a * b and store the result as int32
  *
  */
 class NEGEMMLowpMatrixMultiplyKernel : public INEKernel
@@ -61,16 +58,12 @@
      * The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two
      * kernels change the layout of the original matrices to be more cache-friendly.
      *
-     * @param[in]  input0          Input tensor containing the interleaved Matrix A. Data type supported: U8
-     * @param[in]  input1          Input tensor containing the transposed Matrix B. Data type supported: same as @p input0
-     * @param[out] output          Output tensor to store the result of matrix multiplication, Data type supported: same as @p input0
-     * @param[in]  a_offset        Offset to be added to each element of the matrix A.
-     * @param[in]  b_offset        Offset to be added to each element of the matrix B.
-     * @param[in]  output_offset   Offset to be added to each element of the output matrix
-     * @param[in]  output_mult_int Value to be multipied to each entry of the result.
-     * @param[in]  shift           Number of bits to shift right the result.
+     * @param[in]  input0 Input tensor containing the interleaved Matrix A. Data type supported: U8
+     * @param[in]  input1 Input tensor containing the transposed Matrix B. Data type supported: same as @p input0
+     * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
      */
-    void configure(const ITensor *input0, const ITensor *input1, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
+    void configure(const ITensor *input0, const ITensor *input1, ITensor *output);
+
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
 
@@ -78,11 +71,7 @@
     const ITensor *_input0;
     const ITensor *_input1;
     ITensor       *_output;
-    int32_t        _a_offset;
-    int32_t        _b_offset;
-    int32_t        _output_offset;
-    int32_t        _output_mult_int;
-    int32_t        _shift;
+    bool           _slide_matrix_b;
 };
 } // namespace arm_compute
 #endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYKERNEL_H__*/
\ No newline at end of file
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
new file mode 100644
index 0000000..143e8b9
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Common interface for all NEON reduction kernels */
+class INEGEMMLowpReductionKernel : public INEKernel
+{
+public:
+    /** Constructor */
+    INEGEMMLowpReductionKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers)*/
+    INEGEMMLowpReductionKernel(const INEGEMMLowpReductionKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers)*/
+    INEGEMMLowpReductionKernel &operator=(const INEGEMMLowpReductionKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    INEGEMMLowpReductionKernel(INEGEMMLowpReductionKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    INEGEMMLowpReductionKernel &operator=(INEGEMMLowpReductionKernel &&) = default;
+
+public:
+    /** Initialise the kernel's input and output.
+     *
+     * @param[in]  input       Input tensor containing the interleaved or transposed matrix. Data type supported: U8
+     * @param[out] output      Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
+     * @param[in]  k           Number of matrix A columns (or matrix B rows)
+     * @param[in]  is_reshaped True if the input tensor has been reshaped
+     */
+    virtual void configure(const ITensor *input, ITensor *output, int32_t k, bool is_reshaped) = 0;
+
+protected:
+    const ITensor *_input;
+    ITensor       *_output;
+    int32_t        _k;
+    bool           _is_reshaped;
+};
+
+/** NEON kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
+ *
+ * @note This stage is needed to handle the offset of matrix product
+ *       https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
+ */
+class NEGEMMLowpMatrixAReductionKernel : public INEGEMMLowpReductionKernel
+{
+public:
+    /** Initialise the kernel's input and output.
+     *
+     * @note The input matrix @p mtx_a_interleaved4x4 must be the output of @ref NEGEMMInterleave4x4Kernel.
+     *
+     * @param[in]  mtx_a_interleaved4x4 Input tensor containing the interleaved Matrix A. Data type supported: U8
+     * @param[out] vector_sum_row       Output row-vector of sums of all the entries in each row of mtx_a_interleaved4x4. Data type supported: S32
+     * @param[in]  num_mtx_a_cols       Number of matrix A columns
+     * @param[in]  is_interleaved4x4    True if the input tensor is interleaved4x4
+     */
+    void configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4) override;
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+};
+
+/** NEON kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
+ *
+ * @note This stage is needed to handle the offset of matrix product
+ *       https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
+ */
+class NEGEMMLowpMatrixBReductionKernel : public INEGEMMLowpReductionKernel
+{
+public:
+    /** Initialise the kernel's input and output.
+     *
+     * @note The input matrix @p mtx_b_transposed1xW must be the output of @ref NEGEMMTranspose1xWKernel kernel.
+     *
+     * @param[in]  mtx_b_transposed1xW Input tensor containing the transposed Matrix B. Data type supported: Data type supported: U8
+     * @param[out] vector_sum_col      Output row-vector of sums of all the entries in each column of mtx_b_transposed1xW. Data type supported: S32
+     * @param[in]  num_mtx_b_rows      Number of matrix B rows
+     * @param[in]  is_transposed1xW    True if the input tensor is transposed 1xW
+     */
+    void configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW) override;
+
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+};
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__ */
diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h
index 3b395e7..ad10260 100644
--- a/arm_compute/core/TensorShape.h
+++ b/arm_compute/core/TensorShape.h
@@ -117,8 +117,8 @@
 
     /** Collapse the first n dimensions.
      *
+     * @param[in] n     Number of dimensions to collapse into @p first
      * @param[in] first Dimensions into which the following @p n are collapsed.
-     * @param[in] n     Number of dimensions to collapse into @p first.
      */
     void collapse(size_t n, size_t first = 0)
     {