Update CPU kernels to remove x19 and w19

Resolves: COMPMID-5805

Change-Id: Idf720bbb136474810086f5089c5ed23b3f79835a
Signed-off-by: Michael Tyler <michael.tyler@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9081
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
index 61fed43..14ee5d6 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -10,16 +10,16 @@
  * sell copies of the Software, and to permit persons to whom the Software is
  * furnished to do so, subject to the following conditions:
  *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #if defined(__ARM_FEATURE_SVE)
@@ -34,66 +34,66 @@
 
   __asm__ __volatile__(
       ".inst 0xd503477f  // SMSTART ZA\n"
+      "cnth x28\n"
+      "cmp %x[height], x28\n"
       "cnth x27\n"
-      "cmp %x[height], x27\n"
-      "cnth x26\n"
-      "csel x27, %x[height], x27, LT\n"
-      "mov x25, #0x0\n"
+      "csel x28, %x[height], x28, LT\n"
+      "mov x26, #0x0\n"
       "ptrue p13.s\n"
-      "sub x27, x27, #0x1\n"
+      "sub x28, x28, #0x1\n"
       "whilelt p12.h, XZR, %x[height]\n"
-      "whilelt p11.h, x26, %x[height]\n"
-      "mov x24, %x[row_offset]\n"
-      "mov x23, %x[out]\n"
-      "whilelt p10.h, x25, %x[width]\n"
-      "whilelt p9.h, x25, %x[width]\n"
-      "whilelt p8.h, x25, %x[width]\n"
+      "whilelt p11.h, x27, %x[height]\n"
+      "mov x25, %x[row_offset]\n"
+      "mov x24, %x[out]\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
       "1:"  // Width loop
-      "add x22, %x[in], XZR, LSL #3\n"
-      "add x19, %x[in], x26, LSL #3\n"
-      "ldr x21, [x22], #0x8\n"
+      "add x23, %x[in], XZR, LSL #3\n"
+      "add x20, %x[in], x27, LSL #3\n"
+      "ldr x22, [x23], #0x8\n"
       "mov x12, #0x0\n"
-      "ldr x20, [x19], #0x8\n"
-      "cbz x27, 3f\n"
+      "ldr x21, [x20], #0x8\n"
+      "cbz x28, 3f\n"
       "2:"  // Loads: Loop
       ".inst 0x25286581  // psel p1.h, p9.h/Z, p12.h[w12]\n"
       ".inst 0x25286160  // psel p0.h, p8.h/Z, p11.h[w12]\n"
-      ".inst 0xe05806a0  // ld1h { za0h.h[x12] }, p1/Z, [x21, x24, LSL #1]\n"
-      "ldr x21, [x22], #0x8\n"
-      ".inst 0xe0580288  // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
+      ".inst 0xe05906c0  // ld1h { za0h.h[x12] }, p1/Z, [x22, x25, LSL #1]\n"
+      "ldr x22, [x23], #0x8\n"
+      ".inst 0xe05902a8  // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
       "add x12, x12, #0x2\n"
-      "cmp x12, x27, LSL #1\n"
-      "ldr x20, [x19], #0x8\n"
+      "cmp x12, x28, LSL #1\n"
+      "ldr x21, [x20], #0x8\n"
       "blt 2b\n"
       "3:"  // Loads: Tail
-      "sub x19, %x[width], x25\n"
+      "sub x20, %x[width], x26\n"
       ".inst 0x25286580  // psel p0.h, p9.h/Z, p12.h[w12]\n"
-      ".inst 0xe05802a0  // ld1h { za0h.h[x12] }, p0/Z, [x21, x24, LSL #1]\n"
+      ".inst 0xe05902c0  // ld1h { za0h.h[x12] }, p0/Z, [x22, x25, LSL #1]\n"
       ".inst 0x25286160  // psel p0.h, p8.h/Z, p11.h[w12]\n"
-      "cmp x19, x26\n"
-      ".inst 0xe0580288  // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
+      "cmp x20, x27\n"
+      ".inst 0xe05902a8  // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
       "mov x12, #0x0\n"
-      "csel x19, x19, x26, LT\n"
+      "csel x20, x20, x27, LT\n"
       "4:"  // Stores: Loop
       ".inst 0x25287540  // psel p0.h, p13.h/Z, p10.h[w12]\n"
-      ".inst 0xe07f82e0  // st1h { za0v.h[x12] }, p0/Z, [x23, XZR, LSL #1]\n"
+      ".inst 0xe07f8300  // st1h { za0v.h[x12] }, p0/Z, [x24, XZR, LSL #1]\n"
       ".inst 0x25287540  // psel p0.h, p13.h/Z, p10.h[w12]\n"
-      ".inst 0xe07a82e8  // st1h { za1v.h[x12] }, p0/Z, [x23, x26, LSL #1]\n"
+      ".inst 0xe07b8308  // st1h { za1v.h[x12] }, p0/Z, [x24, x27, LSL #1]\n"
       "add x12, x12, #0x1\n"
-      "cmp x12, x19\n"
-      "addvl x23, x23, #4\n"
+      "cmp x12, x20\n"
+      "addvl x24, x24, #4\n"
       "blt 4b\n"
+      "inch x26\n"
+      "whilelt p10.h, x26, %x[width]\n"
+      "whilelt p9.h, x26, %x[width]\n"
+      "whilelt p8.h, x26, %x[width]\n"
       "inch x25\n"
-      "whilelt p10.h, x25, %x[width]\n"
-      "whilelt p9.h, x25, %x[width]\n"
-      "whilelt p8.h, x25, %x[width]\n"
-      "inch x24\n"
       "b.any 1b\n"
-      "mov %x[out], x23\n"
+      "mov %x[out], x24\n"
       ".inst 0xd503467f  // SMSTOP\n"
       : [out] "+&r" (out)
       : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
-      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+      : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
     );
 }