COMPMID-2178: Update GEMM assembly code.

Perform offset reduction and requantization within the assembly wrapper.

Change-Id: I5d5b3e1f6f9ef4c71805362c57f88ff199c027a3
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1541
Comments-Addressed: Pablo Marquez <pablo.tello@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
index b5a2978..ec4f700 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -67,6 +67,7 @@
      * @param[in]  method    GemmMethod to use to perform the matrix multiplication.
      * @param[in]  a         Input tensor (Matrix A).
      * @param[in]  b         Input tensor (Matrix B).
+     * @param[in]  c         Input tensor (Matrix C) used to pass the bias for quantized calculations
      * @param[out] d         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in]  alpha     Scalar multiplier to apply to AB matrix product.
      * @param[in]  beta      Scalar multiplier to apply to input D matrix before adding product.
@@ -74,7 +75,7 @@
      *
      * @return True if the method is supported and the function was successfully created, false otherwise.
      */
-    bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
+    bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
 
     /** Interface for the arm_gemm fallback */
     std::unique_ptr<IFallback>      _arm_gemm;
@@ -85,17 +86,19 @@
      *
      * @param[in]  a         Input tensor (Matrix A)
      * @param[in]  b         Input tensor (Matrix B)
+     * @param[in]  c         Input tensor (Matrix C) used to pass the bias for quantized calculations
      * @param[out] d         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in]  alpha     Scalar multiplier to apply to AB matrix product.
      * @param[in]  beta      Scalar multiplier to apply to input D matrix before adding product.
      * @param[in]  gemm_info GEMM meta-data
      */
-    void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
+    void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
 
     /** Indicates whether or not this function can be used to process the given parameters.
      *
-     * @param[in] a         Input tensor (Matrix A)
-     * @param[in] b         Input tensor (Matrix B)
+     * @param[in] a         Input tensor info (Matrix A)
+     * @param[in] b         Input tensor info (Matrix B)
+     * @param[in] c         Input tensor info (Matrix C) used to pass the bias for quantized calculations
      * @param[in] d         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in] alpha     Scalar multiplier to apply to AB matrix product.
      * @param[in] beta      Scalar multiplier to apply to input D matrix before adding product.
@@ -103,7 +106,7 @@
      *
      * @return a status.
      */
-    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info);
+    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info);
     /** Was the function successfully configured ?
      *
      * @return True if the function is configured and ready to run
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 3868284..dd23f1c 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -35,11 +35,10 @@
 
 namespace arm_compute
 {
+// Forward declarations
 class ITensor;
 
-/** Basic function to execute matrix multiply assembly kernels.
- *
-*/
+/** Basic function to execute matrix multiply assembly kernels. */
 class NEGEMMLowpAssemblyMatrixMultiplyCore : public IFunction
 {
 public:
@@ -49,9 +48,10 @@
      *
      * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8, S8.
      * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
+     * @param[in]  c      Third input tensor (Matrix C). Data type supported: same as @p a
      * @param[out] output Output tensor. Data type supported: Data type supported: U32, S32
      */
-    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+    void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output);
 
     // Inherited methods overridden:
     void run() override;
@@ -65,5 +65,5 @@
     Tensor                     _tmp_a;
     Tensor                     _tmp_b;
 };
-}
+} // namespace arm_compute
 #endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index d3b27e4..5b6a0dd 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -122,7 +122,8 @@
     int32_t                                       _a_offset;
     int32_t                                       _b_offset;
     bool                                          _run_vector_matrix_multiplication;
-    bool                                          _dot_product_path;
+    bool                                          _assembly_path;
+    bool                                          _fused_assembly_path;
     bool                                          _reshape_b_only_on_first_run;
     bool                                          _is_prepared;
     bool                                          _fuse_output_stage;