COMPMID-632: Integrated Assembly kernel GEMM S8 for Arm Cortex-A53.

Change-Id: I07faa0c984759a1b5db1e5de71f4ab3eef5888d8
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110334
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 3ad1931..7fb5f78 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -113,6 +113,7 @@
 #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
 #include "arm_compute/core/NEON/kernels/arm32/NEGEMMAArch32Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMAArch64Kernel.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEHGEMMAArch64FP16Kernel.h"
diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h
new file mode 100644
index 0000000..6f4ddea
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPAARCH64A53KERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPAARCH64A53KERNEL_H__
+
+#include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
+
+// Enable only if compiled for AArch64-V8A targets
+#ifdef ARM_COMPUTE_AARCH64_V8A
+
+namespace arm_compute
+{
+class ITensor;
+
+/** AArch64 NEON kernel to multiply two input matrices "A" and "B". */
+class NEGEMMLowpAArch64A53Kernel : public NEGEMMAssemblyBaseKernel
+{
+public:
+    // Inherited methods overridden:
+    void run(const Window &window, const ThreadInfo &info) override;
+
+protected:
+    void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) override;
+};
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_AARCH64_V8A */
+#endif /*__ARM_COMPUTE_NEGEMMLOWPAARCH64A53KERNEL_H__*/
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8.hpp
new file mode 100644
index 0000000..f7659b9
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8.hpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+// Actual kernel implementations
+#include "a64_gemm_s16_12x8/generic.hpp"
+
+// 12x8 SGEMM "strategy" class.
+//
+// This describes the characteristics of a family of kernels, in terms of
+// the required interleave properties and the output block size.
+//
+// All kernels in the family must share these characteristics.  The actual
+// kernel to be used can be chosen at runtime, based on the CPU_type
+// structure.
+class gemm_s16_12x8 {
+public:
+    typedef int16_t operand_type;
+    typedef int32_t result_type;
+
+    typedef void (*kern_type)(const int16_t *, const int16_t *, int32_t *, int, int, int);
+
+    /* Describes the data layout for A input */
+    static const int A_interleave = 8;
+    static const int A_block = 1;
+    static const int A_transpose = 0;
+
+    /* Same for B input */
+    static const int B_interleave = 12;
+    static const int B_block = 1;
+    static const int B_transpose = 1;
+
+    /* Kernel blocking parameters */
+    static const int out_width = 12;
+    static const int out_height = 8;
+    static const int k_unroll = 1;
+
+    kern_type kernel = nullptr;
+
+    gemm_s16_12x8(const CPUInfo *ci) {
+        kernel = a64_gemm_s16_asimd_12x8;
+    }
+};
+
+#endif // __aarch64__
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8/generic.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8/generic.hpp
new file mode 100644
index 0000000..10259b2
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8/generic.hpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#include <arm_neon.h>
+
+inline void a64_gemm_s16_asimd_12x8(const int16_t *Apanel, const int16_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K)
+{
+  const int16_t *a_ptr = Apanel;
+  int32_t *c_ptr = Cpanel;
+  for (int yb = 0; yb < ablocks; yb++)
+  {
+    const int16_t *a_ptr0 = a_ptr;
+    const int16_t *b_ptr = Bpanel;
+
+    for (int xb = 0; xb < bblocks; xb++)
+    {
+      a_ptr = a_ptr0;
+      const bool odd_k = K & 0x1;
+      int k = (K+1)/2 - 1;
+
+      register int16x8_t aa asm("v0");
+      register int16x8_t ab asm("v1");
+      register int16x8_t b0 asm("v2");
+      register int16x8_t b1 asm("v3");
+      register int16x8_t b2 asm("v4");
+
+      __asm __volatile (
+        "ldr %d[aa], [%x[a_ptr]]\n"  // Load A[A].lower
+        "movi v5.4s, #0\n"
+        "ldr x20, [%x[a_ptr], #0x08]\n"  // Load A[A].upper
+        "movi v6.4s, #0\n"
+        "ldr %d[b0], [%x[b_ptr]]\n"  // Load B[0].lower
+        "ins %[aa].d[1], x20\n"  // Merge A[A].lower and upper
+        "movi v7.4s, #0\n"
+        ASM_PREFETCH("[%[a_ptr], #64]")
+        "movi v8.4s, #0\n"
+        "ldr x20, [%x[b_ptr], #0x08]\n"  // Load B[0].upper
+        "movi v9.4s, #0\n"
+        ASM_PREFETCH("[%[b_ptr], #64]")
+        "movi v10.4s, #0\n"
+        "ldr %d[b1], [%x[b_ptr], #0x10]\n"  // Load B[1].lower
+        "ins %[b0].d[1], x20\n"  // Merge B[0].lower and upper
+        "movi v11.4s, #0\n"
+        ASM_PREFETCH("[%[a_ptr], #96]")
+        "movi v12.4s, #0\n"
+        "movi v13.4s, #0\n"
+        ASM_PREFETCH("[%[b_ptr], #96]")
+        "movi v14.4s, #0\n"
+        "movi v15.4s, #0\n"
+        ASM_PREFETCH("[%[a_ptr], #128]")
+        "movi v16.4s, #0\n"
+        "movi v17.4s, #0\n"
+        ASM_PREFETCH("[%[b_ptr], #128]")
+        "movi v18.4s, #0\n"
+        "movi v19.4s, #0\n"
+        ASM_PREFETCH("[%[a_ptr], #160]")
+        "movi v20.4s, #0\n"
+        "movi v21.4s, #0\n"
+        ASM_PREFETCH("[%[b_ptr], #160]")
+        "movi v22.4s, #0\n"
+        "movi v23.4s, #0\n"
+        ASM_PREFETCH("[%[a_ptr], #192]")
+        "movi v24.4s, #0\n"
+        "add %x[a_ptr], %x[a_ptr], #0x10\n"
+        "movi v25.4s, #0\n"
+        ASM_PREFETCH("[%[b_ptr], #192]")
+        "movi v26.4s, #0\n"
+        "add %x[b_ptr], %x[b_ptr], #0x18\n"
+        "movi v27.4s, #0\n"
+        "movi v28.4s, #0\n"
+
+        "cbz %x[k], 2f\n"  // Skip the loop if doing zero iterations.
+
+        "1:\n"  // Main loop
+          // First unroll
+          "smlal v5.4s, %[b0].4h, %[aa].h[0]\n"
+          "ldr x20, [%x[b_ptr]]\n"  // Load B[1].upper
+          "smlal v6.4s, %[b0].4h, %[aa].h[1]\n"
+          "smlal v7.4s, %[b0].4h, %[aa].h[2]\n"
+          "ldr %d[ab], [%x[a_ptr]]\n"  // Load A[B].lower
+          "ins %[b1].d[1], x20\n"  // Merge B[1].lower and .upper
+          "smlal v8.4s, %[b0].4h, %[aa].h[3]\n"
+          "smlal v9.4s, %[b0].4h, %[aa].h[4]\n"
+          "ldr x20, [%x[a_ptr], #0x8]\n"  // Load A[B].upper
+          "smlal v10.4s, %[b0].4h, %[aa].h[5]\n"
+          "smlal v11.4s, %[b0].4h, %[aa].h[6]\n"
+          "ldr %d[b2], [%x[b_ptr], #0x8]\n"  // Load B[2].lower
+          "ins %[ab].d[1], x20\n"  // Merge A[B].lower and .upper
+          "smlal v12.4s, %[b0].4h, %[aa].h[7]\n"
+          "smlal2 v13.4s, %[b0].8h, %[aa].h[0]\n"
+          "ldr x20, [%x[b_ptr], #0x10]\n"  // Load B[2].upper
+          "smlal2 v14.4s, %[b0].8h, %[aa].h[1]\n"
+          "smlal2 v15.4s, %[b0].8h, %[aa].h[2]\n"
+          "smlal2 v16.4s, %[b0].8h, %[aa].h[3]\n"
+          "smlal2 v17.4s, %[b0].8h, %[aa].h[4]\n"
+          "smlal2 v18.4s, %[b0].8h, %[aa].h[5]\n"
+          "smlal2 v19.4s, %[b0].8h, %[aa].h[6]\n"
+          "smlal2 v20.4s, %[b0].8h, %[aa].h[7]\n"
+          "ldr %d[b0], [%x[b_ptr], #0x18]\n"  // Load B[0].lower
+          "ins %[b2].d[1], x20\n"  // Merge B[2].lower and .upper
+          "smlal v21.4s, %[b1].4h, %[aa].h[0]\n"
+          "smlal v22.4s, %[b1].4h, %[aa].h[1]\n"
+          "ldr x20, [%x[b_ptr], #0x20]\n"  // Load B[0].upper
+          "smlal v23.4s, %[b1].4h, %[aa].h[2]\n"
+          "smlal v24.4s, %[b1].4h, %[aa].h[3]\n"
+          "smlal v25.4s, %[b1].4h, %[aa].h[4]\n"
+          "smlal v26.4s, %[b1].4h, %[aa].h[5]\n"
+          "smlal v27.4s, %[b1].4h, %[aa].h[6]\n"
+          "smlal v28.4s, %[b1].4h, %[aa].h[7]\n"
+
+          // Second unroll
+          "smlal2 v5.4s, %[b1].8h, %[ab].h[0]\n"
+          "ldr %d[aa], [%x[a_ptr], #0x10]\n"  // Load A[A].lower
+          "ins %[b0].d[1], x20\n"  // Merge B[0].lower and .upper
+          "smlal2 v6.4s, %[b1].8h, %[ab].h[1]\n"
+          "smlal2 v7.4s, %[b1].8h, %[ab].h[2]\n"
+          "ldr x20, [%x[a_ptr], #0x18]\n"  // Load A[A].upper
+          "smlal2 v8.4s, %[b1].8h, %[ab].h[3]\n"
+          "smlal2 v9.4s, %[b1].8h, %[ab].h[4]\n"
+          "smlal2 v10.4s, %[b1].8h, %[ab].h[5]\n"
+          "smlal2 v11.4s, %[b1].8h, %[ab].h[6]\n"
+          "add %x[a_ptr], %x[a_ptr], #0x20\n"
+          "smlal2 v12.4s, %[b1].8h, %[ab].h[7]\n"
+          "smlal v13.4s, %[b2].4h, %[ab].h[0]\n"
+          ASM_PREFETCH("[%[b_ptr], #320]")
+          "smlal v14.4s, %[b2].4h, %[ab].h[1]\n"
+          "smlal v15.4s, %[b2].4h, %[ab].h[2]\n"
+          ASM_PREFETCH("[%[a_ptr], #320]")
+          "smlal v16.4s, %[b2].4h, %[ab].h[3]\n"
+          "smlal v17.4s, %[b2].4h, %[ab].h[4]\n"
+          ASM_PREFETCH("[%[b_ptr], #448]")
+          "smlal v18.4s, %[b2].4h, %[ab].h[5]\n"
+          "smlal v19.4s, %[b2].4h, %[ab].h[6]\n"
+          "smlal v20.4s, %[b2].4h, %[ab].h[7]\n"
+          "smlal2 v21.4s, %[b2].8h, %[ab].h[0]\n"
+          "smlal2 v22.4s, %[b2].8h, %[ab].h[1]\n"
+          "subs %x[k], %x[k], #0x1\n"
+          "smlal2 v23.4s, %[b2].8h, %[ab].h[2]\n"
+          "smlal2 v24.4s, %[b2].8h, %[ab].h[3]\n"
+          "ldr %d[b1], [%x[b_ptr], #0x28]\n"  // Load B[1].lower
+          "ins %[aa].d[1], x20\n"  // Merge A[A].lower and .upper
+          "smlal2 v25.4s, %[b2].8h, %[ab].h[4]\n"
+          "smlal2 v26.4s, %[b2].8h, %[ab].h[5]\n"
+          "add %x[b_ptr], %x[b_ptr], #0x30\n"
+          "smlal2 v27.4s, %[b2].8h, %[ab].h[6]\n"
+          "smlal2 v28.4s, %[b2].8h, %[ab].h[7]\n"
+          "bne 1b\n"
+
+        "2:\n"  // Even tail
+          "cbnz %x[odd_k], 3f\n"
+
+          "smlal v5.4s, %[b0].4h, %[aa].h[0]\n"
+          "ldr x20, [%x[b_ptr]]\n"  // Load B[1].upper
+          "smlal v6.4s, %[b0].4h, %[aa].h[1]\n"
+          "smlal v7.4s, %[b0].4h, %[aa].h[2]\n"
+          "ldr %d[ab], [%x[a_ptr]]\n"  // Load A[B].lower
+          "ins %[b1].d[1], x20\n"  // Merge B[1].lower and .upper
+          "smlal v8.4s, %[b0].4h, %[aa].h[3]\n"
+          "smlal v9.4s, %[b0].4h, %[aa].h[4]\n"
+          "ldr x20, [%x[a_ptr], #0x8]\n"  // Load A[B].upper
+          "smlal v10.4s, %[b0].4h, %[aa].h[5]\n"
+          "smlal v11.4s, %[b0].4h, %[aa].h[6]\n"
+          "ldr %d[b2], [%x[b_ptr], #0x8]\n"  // Load B[2].lower
+          "ins %[ab].d[1], x20\n"  // Merge A[B].lower and .upper
+          "smlal v12.4s, %[b0].4h, %[aa].h[7]\n"
+          "smlal2 v13.4s, %[b0].8h, %[aa].h[0]\n"
+          "ldr x20, [%x[b_ptr], #0x10]\n"  // Load B[2].upper
+          "smlal2 v14.4s, %[b0].8h, %[aa].h[1]\n"
+          "smlal2 v15.4s, %[b0].8h, %[aa].h[2]\n"
+          "smlal2 v16.4s, %[b0].8h, %[aa].h[3]\n"
+          "add %[a_ptr], %[a_ptr], #0x10\n"
+          "smlal2 v17.4s, %[b0].8h, %[aa].h[4]\n"
+          "add %[b_ptr], %[b_ptr], #0x18\n"
+          "smlal2 v18.4s, %[b0].8h, %[aa].h[5]\n"
+          "smlal2 v19.4s, %[b0].8h, %[aa].h[6]\n"
+          "smlal2 v20.4s, %[b0].8h, %[aa].h[7]\n"
+          "ins %[b2].d[1], x20\n"  // Merge B[2].lower and .upper
+          "smlal v21.4s, %[b1].4h, %[aa].h[0]\n"
+          "smlal v22.4s, %[b1].4h, %[aa].h[1]\n"
+          "smlal v23.4s, %[b1].4h, %[aa].h[2]\n"
+          "smlal v24.4s, %[b1].4h, %[aa].h[3]\n"
+          "smlal v25.4s, %[b1].4h, %[aa].h[4]\n"
+          "smlal v26.4s, %[b1].4h, %[aa].h[5]\n"
+          "smlal v27.4s, %[b1].4h, %[aa].h[6]\n"
+          "smlal v28.4s, %[b1].4h, %[aa].h[7]\n"
+
+          "smlal2 v5.4s, %[b1].8h, %[ab].h[0]\n"
+          "smlal v13.4s, %[b2].4h, %[ab].h[0]\n"
+          "smlal2 v21.4s, %[b2].8h, %[ab].h[0]\n"
+          "smlal2 v6.4s, %[b1].8h, %[ab].h[1]\n"
+          "smlal v14.4s, %[b2].4h, %[ab].h[1]\n"
+          "str q5, [%x[c_ptr]]\n"
+          "smlal2 v22.4s, %[b2].8h, %[ab].h[1]\n"
+          "str q13, [%x[c_ptr], #0x10]\n"
+          "smlal2 v7.4s, %[b1].8h, %[ab].h[2]\n"
+          "str q21, [%x[c_ptr], #0x20]\n"
+          "smlal v15.4s, %[b2].4h, %[ab].h[2]\n"
+          "str q6, [%x[c_ptr], #0x30]\n"
+          "smlal2 v23.4s, %[b2].8h, %[ab].h[2]\n"
+          "str q14, [%x[c_ptr], #0x40]\n"
+          "smlal2 v8.4s, %[b1].8h, %[ab].h[3]\n"
+          "str q22, [%x[c_ptr], #0x50]\n"
+          "smlal v16.4s, %[b2].4h, %[ab].h[3]\n"
+          "str q7, [%x[c_ptr], #0x60]\n"
+          "smlal2 v24.4s, %[b2].8h, %[ab].h[3]\n"
+          "str q15, [%x[c_ptr], #0x70]\n"
+          "smlal2 v9.4s, %[b1].8h, %[ab].h[4]\n"
+          "str q23, [%x[c_ptr], #0x80]\n"
+          "smlal v17.4s, %[b2].4h, %[ab].h[4]\n"
+          "str q8, [%x[c_ptr], #0x90]\n"
+          "smlal2 v25.4s, %[b2].8h, %[ab].h[4]\n"
+          "str q16, [%x[c_ptr], #0xa0]\n"
+          "smlal2 v10.4s, %[b1].8h, %[ab].h[5]\n"
+          "str q24, [%x[c_ptr], #0xb0]\n"
+          "smlal v18.4s, %[b2].4h, %[ab].h[5]\n"
+          "str q9, [%x[c_ptr], #0xc0]\n"
+          "smlal2 v26.4s, %[b2].8h, %[ab].h[5]\n"
+          "str q17, [%x[c_ptr], #0xd0]\n"
+          "smlal2 v11.4s, %[b1].8h, %[ab].h[6]\n"
+          "str q25, [%x[c_ptr], #0xe0]\n"
+          "smlal v19.4s, %[b2].4h, %[ab].h[6]\n"
+          "str q10, [%x[c_ptr], #0xf0]\n"
+          "smlal2 v27.4s, %[b2].8h, %[ab].h[6]\n"
+          "str q18, [%x[c_ptr], #0x100]\n"
+          "smlal2 v12.4s, %[b1].8h, %[ab].h[7]\n"
+          "str q26, [%x[c_ptr], #0x110]\n"
+          "smlal v20.4s, %[b2].4h, %[ab].h[7]\n"
+          "str q11, [%x[c_ptr], #0x120]\n"
+          "smlal2 v28.4s, %[b2].8h, %[ab].h[7]\n"
+          "str q19, [%x[c_ptr], #0x130]\n"
+          "b 4f\n"  // Complete write out
+
+        "3:\n"  // Odd tail
+          "smlal v5.4s, %[b0].4h, %[aa].h[0]\n"
+          "smlal2 v13.4s, %[b0].8h, %[aa].h[0]\n"
+          "smlal v21.4s, %[b1].4h, %[aa].h[0]\n"
+          "smlal v6.4s, %[b0].4h, %[aa].h[1]\n"
+          "smlal2 v14.4s, %[b0].8h, %[aa].h[1]\n"
+          "smlal v22.4s, %[b1].4h, %[aa].h[1]\n"
+          "str q5, [%x[c_ptr]]\n"
+          "smlal v7.4s, %[b0].4h, %[aa].h[2]\n"
+          "str q13, [%x[c_ptr], #0x10]\n"
+          "smlal2 v15.4s, %[b0].8h, %[aa].h[2]\n"
+          "str q21, [%x[c_ptr], #0x20]\n"
+          "smlal v23.4s, %[b1].4h, %[aa].h[2]\n"
+          "str q6, [%x[c_ptr], #0x30]\n"
+          "smlal v8.4s, %[b0].4h, %[aa].h[3]\n"
+          "str q14, [%x[c_ptr], #0x40]\n"
+          "smlal2 v16.4s, %[b0].8h, %[aa].h[3]\n"
+          "str q22, [%x[c_ptr], #0x50]\n"
+          "smlal v24.4s, %[b1].4h, %[aa].h[3]\n"
+          "str q7, [%x[c_ptr], #0x60]\n"
+          "smlal v9.4s, %[b0].4h, %[aa].h[4]\n"
+          "str q15, [%x[c_ptr], #0x70]\n"
+          "smlal2 v17.4s, %[b0].8h, %[aa].h[4]\n"
+          "str q23, [%x[c_ptr], #0x80]\n"
+          "smlal v25.4s, %[b1].4h, %[aa].h[4]\n"
+          "str q8, [%x[c_ptr], #0x90]\n"
+          "smlal v10.4s, %[b0].4h, %[aa].h[5]\n"
+          "str q16, [%x[c_ptr], #0xa0]\n"
+          "smlal2 v18.4s, %[b0].8h, %[aa].h[5]\n"
+          "str q24, [%x[c_ptr], #0xb0]\n"
+          "smlal v26.4s, %[b1].4h, %[aa].h[5]\n"
+          "str q9, [%x[c_ptr], #0xc0]\n"
+          "smlal v11.4s, %[b0].4h, %[aa].h[6]\n"
+          "str q17, [%x[c_ptr], #0xd0]\n"
+          "smlal2 v19.4s, %[b0].8h, %[aa].h[6]\n"
+          "str q25, [%x[c_ptr], #0xe0]\n"
+          "smlal v27.4s, %[b1].4h, %[aa].h[6]\n"
+          "str q10, [%x[c_ptr], #0xf0]\n"
+          "smlal v12.4s, %[b0].4h, %[aa].h[7]\n"
+          "str q18, [%x[c_ptr], #0x100]\n"
+          "smlal2 v20.4s, %[b0].8h, %[aa].h[7]\n"
+          "str q26, [%x[c_ptr], #0x110]\n"
+          "smlal v28.4s, %[b1].4h, %[aa].h[7]\n"
+          "str q11, [%x[c_ptr], #0x120]\n"
+
+        "4:\n"  // End of function
+          "str q19, [%x[c_ptr], #0x130]\n"
+          "str q27, [%x[c_ptr], #0x140]\n"
+          "str q12, [%x[c_ptr], #0x150]\n"
+          "str q20, [%x[c_ptr], #0x160]\n"
+          "str q28, [%x[c_ptr], #0x170]\n"
+          "add %x[c_ptr], %x[c_ptr], #0x180\n"
+        : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k),
+          [aa] "+w" (aa), [ab] "+w" (ab), [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2)
+        : [odd_k] "r" (odd_k)
+        : "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "cc"
+      );
+    }
+  }
+}
diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.cpp
new file mode 100644
index 0000000..fe6e821
--- /dev/null
+++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8.hpp"
+} // namespace arm_compute
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+// Enable only if compiled for AArch64-V8A targets
+#ifdef ARM_COMPUTE_AARCH64_V8A
+
+namespace arm_compute
+{
+void NEGEMMLowpAArch64A53Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1)
+{
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
+
+    _input0      = input0;
+    _input1      = input1;
+    _output      = output;
+    _workspace   = workspace;
+    _alpha       = alpha;
+    _beta        = beta;
+    _transform_0 = transform_0;
+    _transform_1 = transform_1;
+
+    // Configure kernel window
+    Window win = calculate_max_window(*output->info());
+
+    AccessWindowRectangle output_access(output->info(), 0, 0, 12, 8);
+
+    const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 12);
+    const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 12);
+
+    update_window_and_padding(win,
+                              AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()),
+                              AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()),
+                              output_access);
+
+    INEKernel::configure(win);
+}
+
+void NEGEMMLowpAArch64A53Kernel::run(const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+    const int lda = _input0->info()->strides_in_bytes().y();
+    const int ldb = _input1->info()->strides_in_bytes().y();
+    const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t);
+
+    const auto in1_ptr = reinterpret_cast<const int8_t *>(_input1->buffer());
+
+    const int M = std::min(_output->info()->tensor_shape().y(), static_cast<size_t>(window.y().end())) - window.y().start();
+    const int N = _output->info()->tensor_shape().x();
+    const int K = _input0->info()->tensor_shape().x();
+
+    // Only iterate over batches
+    Window win(window);
+    win.set(0, Window::Dimension(0, 1, 1));
+    win.set(1, Window::Dimension(0, 1, 1));
+
+    Iterator in0(_input0, window);
+    Iterator out(_output, window);
+
+    GemmInterleaved<gemm_s16_12x8, int8_t, int32_t> gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1);
+
+    constexpr size_t alignment      = 4096;
+    const size_t     offset         = (gemm.get_working_size() + alignment - 1) * info.thread_id;
+    void            *workspace      = _workspace->buffer() + offset;
+    size_t           workspace_size = _workspace->info()->total_size();
+
+    if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr)
+    {
+        ARM_COMPUTE_ERROR("Not enough space to align buffer!");
+    }
+
+    execute_window_loop(win, [&](const Coordinates & id)
+    {
+        gemm.execute(reinterpret_cast<const int8_t *>(in0.ptr()), lda,
+                     reinterpret_cast<const int8_t *>(in1_ptr), ldb,
+                     reinterpret_cast<int32_t *>(out.ptr()), ldc,
+                     _alpha, _beta, workspace);
+    },
+    in0, out);
+}
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_AARCH64_V8A */
diff --git a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
index 1bf437e..0423777 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
@@ -29,6 +29,7 @@
 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64A53Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h"
 #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
 #include "arm_compute/core/TensorInfo.h"
@@ -41,10 +42,10 @@
 namespace arm_compute
 {
 #include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s16_12x8.hpp"
 #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp"
 #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp"
 #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp"
-
 } // namespace arm_compute
 
 using namespace arm_compute;
@@ -91,7 +92,19 @@
     }
     else
 #elif defined(ARM_COMPUTE_AARCH64_V8A)
-    if(1)
+    if(ci.CPU == CPUTarget::A53)
+    {
+        // Configure matrix multiply kernel
+        GemmInterleaved<gemm_s16_12x8, int8_t, int32_t> gemm(&ci, M, N, K, false, false);
+        _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+        _memory_group.manage(&_workspace);
+        // Configure matrix multiplication kernel
+        auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64A53Kernel>();
+        k->configure(a, b, output, &_workspace, 1.f, 1.f);
+        _mm_kernel = std::move(k);
+        _workspace.allocator()->allocate();
+    }
+    else if(1) // Generic v8a kernel
     {
         switch(a->info()->data_type())
         {