COMPMID-2178: Update GEMM assembly code.

Perform offset reduction and requantization within the assembly wrapper.

Change-Id: I5d5b3e1f6f9ef4c71805362c57f88ff199c027a3
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1541
Comments-Addressed: Pablo Marquez <pablo.tello@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 3868284..dd23f1c 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -35,11 +35,10 @@
 
 namespace arm_compute
 {
+// Forward declarations
 class ITensor;
 
-/** Basic function to execute matrix multiply assembly kernels.
- *
-*/
+/** Basic function to execute matrix multiply assembly kernels. */
 class NEGEMMLowpAssemblyMatrixMultiplyCore : public IFunction
 {
 public:
@@ -49,9 +48,10 @@
      *
      * @param[in]  a      First input tensor  (Matrix A). Data type supported: U8, S8.
      * @param[in]  b      Second input tensor (Matrix B). Data type supported: same as @p a
+     * @param[in]  c      Third input tensor (Matrix C). Data type supported: same as @p a
      * @param[out] output Output tensor. Data type supported: Data type supported: U32, S32
      */
-    void configure(const ITensor *a, const ITensor *b, ITensor *output);
+    void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output);
 
     // Inherited methods overridden:
     void run() override;
@@ -65,5 +65,5 @@
     Tensor                     _tmp_a;
     Tensor                     _tmp_b;
 };
-}
+} // namespace arm_compute
 #endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */