COMPMID-616 - Optimizing GEMMLowp on NEON intrinsics

Change-Id: Ibbeff5d37249b6e8fc34ad496035a1511c9da5a3
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94072
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index c614e50..563ade2 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -61,6 +61,7 @@
 #include "arm_compute/runtime/NEON/functions/NEGEMM.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h"
 #include "arm_compute/runtime/NEON/functions/NEGaussian3x3.h"
 #include "arm_compute/runtime/NEON/functions/NEGaussian5x5.h"
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
index 84850db..865c729 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
@@ -25,17 +25,13 @@
 #define __ARM_COMPUTE_NEGEMMLOWP_H__
 
 #include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
 #include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/Tensor.h"
-
-#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/Tensor.h"
 
 #include <memory>
 
@@ -43,12 +39,13 @@
 {
 class ITensor;
 
-/** Basic function to execute GEMMLowp on NEON. This function calls the following NEON kernels:
-*
-*  -# @ref NEGEMMInterleave4x4Kernel
-*  -# @ref NEGEMMTranspose1xWKernel
-*  -# @ref NEGEMMLowpMatrixMultiplyKernel
-*
+/** Basic function to execute GEMMLowp on NEON. This function calls the following NEON kernels/function:
+ *
+ *  -# @ref NEGEMMLowpMatrixAReductionKernel
+ *  -# @ref NEGEMMLowpMatrixBReductionKernel
+ *  -# @ref NEGEMMLowpMatrixMultiplyCore
+ *  -# @ref NEGEMMLowpFinalizeKernel
+ *
 */
 class NEGEMMLowp : public IFunction
 {
@@ -58,7 +55,7 @@
     /** Initialise the kernel's inputs, output
     *
     * @note GEMM_LOWP:  low precision GEMM kernel
-    *  This kernel performs the following computation:
+    *  This kernel performs the following computations:
     *
     *  -# Convert a values from uint8 to int32 and add a_offset to each of them.
     *  -# Convert b values from uint8 to int32 and add b_offset to each of them.
@@ -72,35 +69,26 @@
     * @param[out] output          Output tensor. Data type supported: same as @p a.
     * @param[in]  a_offset        Offset to be added to each element of the matrix A.
     * @param[in]  b_offset        Offset to be added to each element of the matrix B.
-    * @param[in]  output_offset   Offset to be added to each element of the output matrix
+    * @param[in]  c_offset        Offset to be added to each element of the output matrix
     * @param[in]  output_mult_int Value to be multiplied to each element of the output matrix
     * @param[in]  shift           Number of bits to shift right the result.
     */
-    void configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
-    /** Initialise the kernel's inputs, output
-    *
-    * @note GEMM_LOWP:  low precision GEMM kernel
-    *  This kernel performs the following computation:
-    *
-    * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8.
-    * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
-    * @param[out] output Output tensor. Data type supported: U32.
-    */
-    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+    void configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t output_mult_int, int32_t shift);
 
     // Inherited methods overridden:
     void run() override;
 
 private:
-    MemoryGroup                                   _memory_group;
-    NEGEMMInterleave4x4Kernel                     _interleave_kernel;
-    NEGEMMTranspose1xWKernel                      _transpose_kernel;
-    NEGEMMLowpMatrixMultiplyKernel                _mm_kernel;
-    std::unique_ptr<NEGEMMLowpAssemblyBaseKernel> _mm_optimised_kernel;
-    NEGEMMInterleaveBlockedKernel                 _interleave_blocked;
-    NEGEMMInterleaveBlockedKernel                 _interleave_blocked_transposed;
-    Tensor                                        _tmp_a;
-    Tensor                                        _tmp_b;
+    MemoryGroup                      _memory_group;
+    NEGEMMLowpMatrixMultiplyCore     _mm_func;
+    NEGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel;
+    NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel;
+    NEGEMMLowpFinalizeKernel         _finalize_kernel;
+    Tensor                           _vector_sum_col;
+    Tensor                           _vector_sum_row;
+    Tensor                           _mm_output;
+    int32_t                          _a_offset;
+    int32_t                          _b_offset;
 };
 }
 #endif /*__ARM_COMPUTE_NEGEMMLOWP_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
new file mode 100644
index 0000000..4866e78
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__
+#define __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to execute GEMMLowpMatrixMultiplyCore on NEON. This function calls the following NEON kernels if the DOT product instruction is not available:
+ *
+ *  -# @ref NEGEMMInterleave4x4Kernel
+ *  -# @ref NEGEMMTranspose1xWKernel
+ *  -# @ref NEGEMMLowpMatrixMultiplyKernel
+ *
+ * otherwise if the DOT product instruction is available:
+ *
+ *  -# @ref NEGEMMInterleaveBlockedKernel
+ *  -# @ref NEGEMMLowpAArch64V8P4Kernel
+ *
+*/
+class NEGEMMLowpMatrixMultiplyCore : public IFunction
+{
+public:
+    /** Constructor */
+    NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    /** Initialise the kernel's inputs, output
+    *
+    * @note GEMM_LOWP:  low precision GEMM kernel
+    *  This kernel performs the following computations:
+    *
+    *  -# Convert a values from uint8 to int32
+    *  -# Convert b values from uint8 to int32
+    *  -# Compute the int32 matrix product of the resulting a * b.
+    *
+    * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8.
+    * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
+    * @param[out] output Output tensor. Data type supported: Data type supported: S32
+    */
+    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+
+    // Inherited methods overridden:
+    void run() override;
+
+private:
+    MemoryGroup                _memory_group;
+    std::unique_ptr<INEKernel> _mm_kernel;
+    std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
+    std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
+    Tensor                     _tmp_a;
+    Tensor                     _tmp_b;
+};
+}
+#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__ */