COMPMID-2178: Update GEMM assembly code.

Perform offset reduction and requantization within the assembly wrapper.

Change-Id: I5d5b3e1f6f9ef4c71805362c57f88ff199c027a3
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1541
Comments-Addressed: Pablo Marquez <pablo.tello@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
index 1b511ba..828b0f2 100644
--- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
@@ -39,7 +39,9 @@
     GEMV_NATIVE_TRANSPOSED,
     GEMM_NATIVE,
     GEMM_HYBRID,
-    GEMM_INTERLEAVED
+    GEMM_INTERLEAVED,
+    QUANTIZE_WRAPPER,
+    GEMM_HYBRID_QUANTIZED
 };
 
 struct KernelDescription
@@ -86,98 +88,52 @@
              const unsigned int nmulti, const bool trA, const bool trB,
              const T alpha, const T beta, const int maxthreads,
              const bool pretransposed_hint, const GemmConfig *cfg=nullptr ) :
-            _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
-            _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads),
-            _pretransposed_hint(pretransposed_hint), _cfg(cfg)
+             _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
+             _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads),
+             _pretransposed_hint(pretransposed_hint), _cfg(cfg)
     {
     }
 };
 
+struct ARequantizeLayer32
+{
+public:
+    const int32_t  *bias;
+    int32_t         a_offset;
+    int32_t         b_offset;
+    int32_t         c_offset;
+    int32_t         requant_shift;
+    int32_t         requant_mul;
+    int32_t         minval;
+    int32_t         maxval;
+
+    ARequantizeLayer32() = default;
+
+    ARequantizeLayer32(int32_t *b, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) :
+        bias(b), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv)
+    {
+    }
+};
+
+struct Nothing
+{
+};
+
 template<typename Top, typename Tret>
 using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret> >;
 
 /* Low level API calls.
  * These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */
 
-/* method_is_compatible(): Can a GEMM of the templated types with the
- * provided parameters be provided using the supplied method?  */
-
-template<typename Top, typename Tret>
-bool method_is_compatible(GemmMethod method, const GemmArgs<Tret> &args);
-
-template<typename Top, typename Tret>
-bool method_is_compatible(GemmMethod method, const CPUInfo &ci,
-                          const unsigned int M, const unsigned int N, const unsigned int K,
-                          const unsigned int nbatches, const unsigned int nmulti,
-                          const bool trA, const bool trB, const Tret alpha, const Tret beta,
-                          const int maxthreads, const bool pretransposed_hint)
-{
-    GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
-
-    return method_is_compatible<Top, Tret>(method, args);
-}
-
 /* get_gemm_method(): Given the templated types and provided parameters,
  * which is the preferred method to implement this GEMM?  */
-template<typename Top, typename Tret>
-KernelDescription get_gemm_method(const GemmArgs<Tret> &args);
+template<typename Top, typename Tret, class OutputStage = Nothing>
+KernelDescription get_gemm_method(const GemmArgs<Tret> &args, const OutputStage & ={});
 
-template<typename Top, typename Tret>
-KernelDescription get_gemm_method(const CPUInfo &ci,
-                                  const unsigned int M, const unsigned int N, const unsigned int K,
-                                  const unsigned int nbatches, const unsigned int nmulti,
-                                  const bool trA, const bool trB, const Tret alpha, const Tret beta,
-                                  const int maxthreads, const bool pretransposed_hint)
-{
-    GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint);
+template<typename Top, typename Tret, class OutputStage = Nothing>
+UniqueGemmCommon<Top, Tret> gemm(const GemmArgs<Tret> &args, const OutputStage & ={});
 
-    return get_gemm_method<Top, Tret>(args);
-}
-
-template<typename Top, typename Tret>
-UniqueGemmCommon<Top, Tret> gemm(const GemmArgs<Tret> &args);
-
-/** Request an object to process a GEMM.
- *
- * @param[in]  ci                 Describes CPU properties.
- * @param[in]  M                  Rows in output matrix C (and input matrix A).
- * @param[in]  N                  Columns in output matrix C (and input matrix B).
- * @param[in]  K                  Columns of input matrix A (= rows of input matrix B).
- * @param[in]  nbatches           Number of "batched" GEMMs (unique A and C, shared B).
- * @param[in]  nmulti             Number of "multi" GEMMs (unique A, B and C).
- * @param[in]  trA                Does A tensor has rows and columns transposed?
- * @param[in]  trB                Does B tensor has rows and columns transposed?
- * @param[in]  alpha              Scalar multiplier to apply to AB matrix product.
- * @param[in]  beta               Scalar multiplier to apply to input C matrix before adding product.
- * @param[in]  maxthreads         Maximum (and default) number of threads that will call execute method.
- * @param[in]  pretransposed_hint Can the B tensor can be pretransposed (ie shared across invocations)?
- * @param[in]  cfg                (optional) configuration parameters
- */
-template<typename Top, typename Tret>
-UniqueGemmCommon<Top, Tret> gemm(const CPUInfo &ci,
-                                 const unsigned int M, const unsigned int N, const unsigned int K,
-                                 const unsigned int nbatches, const unsigned int nmulti,
-                                 const bool trA, const bool trB, const Tret alpha, const Tret beta,
-                                 const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr)
-{
-    GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg);
-
-    return gemm<Top, Tret>(args);
-}
-
-template<typename Top, typename Tret>
-std::vector<KernelDescription> get_compatible_kernels(const GemmArgs<Tret> &args);
-
-template<typename Top, typename Tret>
-std::vector<KernelDescription> get_compatible_kernels(const CPUInfo &ci,
-                                                      const unsigned int M, const unsigned int N, const unsigned int K,
-                                                      const unsigned int nbatches, const unsigned int nmulti,
-                                                      const bool trA, const bool trB, const Tret alpha, const Tret beta,
-                                                      const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr)
-{
-    GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg);
-
-    return get_compatible_kernels<Top, Tret>(args);
-}
+template<typename Top, typename Tret, class OutputStage = Nothing>
+std::vector<KernelDescription> get_compatible_kernels(const GemmArgs<Tret> &args, const OutputStage & ={});
 
 } // namespace arm_gemm
diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
index bb32fea..f59a617 100644
--- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
@@ -46,7 +46,7 @@
      */
     virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
                                     const void *B, const int ldb, /* batches share B */     const int B_multi_stride,
-                                    void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0;
+                                          void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0;
 
     /* For threading, we divide the work into some number of units and work
      * out internally what unit corresponds to what work.  This returns the
@@ -93,6 +93,10 @@
     /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */
     virtual void set_pretransposed_B_data(void *) { }
 
+    /*** "Quantized bias" interface (optional) ***/
+    /* Set the bias vector for quantized GEMMs */
+    virtual void set_quantized_bias(const int32_t *bias) { }
+
     // Destructor
     virtual ~IGemmCommon() { }
 };
@@ -125,7 +129,7 @@
      * strides (templated version with appropriate types). */
     virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
                             const To *B, const int ldb, /* batches share B */     const int B_multi_stride,
-                            Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) {
+                                  Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) {
         _Aptr = A;
         _lda = lda;
         _A_batch_stride = A_batch_stride;
@@ -142,7 +146,7 @@
     /* Implementation of the void * overload which casts its arguments to the appropriate type. */
     void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
                             const void *B, const int ldb, /* batches share B */     const int B_multi_stride,
-                            void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override {
+                                  void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override {
         set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride,
                    static_cast<const To *>(B), ldb, B_multi_stride,
                    static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride);
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
index b5a2978..ec4f700 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -67,6 +67,7 @@
      * @param[in]  method    GemmMethod to use to perform the matrix multiplication.
      * @param[in]  a         Input tensor (Matrix A).
      * @param[in]  b         Input tensor (Matrix B).
+     * @param[in]  c         Input tensor (Matrix C) used to pass the bias for quantized calculations
      * @param[out] d         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in]  alpha     Scalar multiplier to apply to AB matrix product.
      * @param[in]  beta      Scalar multiplier to apply to input D matrix before adding product.
@@ -74,7 +75,7 @@
      *
      * @return True if the method is supported and the function was successfully created, false otherwise.
      */
-    bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
+    bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
 
     /** Interface for the arm_gemm fallback */
     std::unique_ptr<IFallback>      _arm_gemm;
@@ -85,17 +86,19 @@
      *
      * @param[in]  a         Input tensor (Matrix A)
      * @param[in]  b         Input tensor (Matrix B)
+     * @param[in]  c         Input tensor (Matrix C) used to pass the bias for quantized calculations
      * @param[out] d         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in]  alpha     Scalar multiplier to apply to AB matrix product.
      * @param[in]  beta      Scalar multiplier to apply to input D matrix before adding product.
      * @param[in]  gemm_info GEMM meta-data
      */
-    void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
+    void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
 
     /** Indicates whether or not this function can be used to process the given parameters.
      *
-     * @param[in] a         Input tensor (Matrix A)
-     * @param[in] b         Input tensor (Matrix B)
+     * @param[in] a         Input tensor info (Matrix A)
+     * @param[in] b         Input tensor info (Matrix B)
+     * @param[in] c         Input tensor info (Matrix C) used to pass the bias for quantized calculations
      * @param[in] d         Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
      * @param[in] alpha     Scalar multiplier to apply to AB matrix product.
      * @param[in] beta      Scalar multiplier to apply to input D matrix before adding product.
@@ -103,7 +106,7 @@
      *
      * @return a status.
      */
-    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info);
+    static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info);
     /** Was the function successfully configured ?
      *
      * @return True if the function is configured and ready to run
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 3868284..dd23f1c 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -35,11 +35,10 @@
 
 namespace arm_compute
 {
+// Forward declarations
 class ITensor;
 
-/** Basic function to execute matrix multiply assembly kernels.
- *
-*/
+/** Basic function to execute matrix multiply assembly kernels. */
 class NEGEMMLowpAssemblyMatrixMultiplyCore : public IFunction
 {
 public:
@@ -49,9 +48,10 @@
      *
      * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8, S8.
      * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
+     * @param[in]  c      Third input tensor (Matrix C). Data type supported: same as @p a
      * @param[out] output Output tensor. Data type supported: Data type supported: U32, S32
      */
-    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+    void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output);
 
     // Inherited methods overridden:
     void run() override;
@@ -65,5 +65,5 @@
     Tensor                     _tmp_a;
     Tensor                     _tmp_b;
 };
-}
+} // namespace arm_compute
 #endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index d3b27e4..5b6a0dd 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -122,7 +122,8 @@
     int32_t                                       _a_offset;
     int32_t                                       _b_offset;
     bool                                          _run_vector_matrix_multiplication;
-    bool                                          _dot_product_path;
+    bool                                          _assembly_path;
+    bool                                          _fused_assembly_path;
     bool                                          _reshape_b_only_on_first_run;
     bool                                          _is_prepared;
     bool                                          _fuse_output_stage;